path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/sampler.ipynb | ###Markdown
Monte Carlo Markov Chain example
###Code
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import seaborn as sns
names = ['Alicudi','Filicudi','Salina','Lipari','Vulcano','Panarea','Stromboli']
sizes = [8,10,100,500,50,40,5]
def jump(p):
a = np.random.uniform(0,1, 1)
return p >= a
start = np.random.choice(names)
i0 = names.index(start)
print('We start from %s' %start)
visited = []
rejected = []
for i in range(1000):
if i0 == 6:
i1 = 5
elif i0 == 0:
i1 = 1
else:
i1=int(np.random.choice([i0-1,i0+1],1))
#print(i1)
#print('we propose to jump to %s' %(names[i1]))
p_jump = sizes[i1]/sizes[i0]
if jump(p_jump):
i0 = i1
visited.append(i0)
else:
rejected.append(i1)
#print('We are now in %s' %(names[i0]))
f,ax = plt.subplots(1,2,figsize=(10,5))
g0=sns.countplot(visited, ax= ax[0])
labels = [names[i] for i in g0.get_xticks()]
ax[0].set_xticklabels(labels, rotation = 45)
ax[0].set_title('visited')
g1=sns.countplot(rejected, ax= ax[1])
labels = [names[int(i.get_text())] for i in g1.get_xticklabels()]
ax[1].set_xticklabels(labels, rotation = 45)
ax[1].set_title('rejected')
f,ax = plt.subplots(1, figsize=(5,5))
ax.plot(visited)
ax.set_yticks([0,1,2,3,4,5,6])
ax.set_yticklabels(names)
ax.set_xlabel('Days')
###Output
_____no_output_____
###Markdown
MCMC Metropolis-Hastings sampler Adapted from: https://towardsdatascience.com/from-scratch-bayesian-inference-markov-chain-monte-carlo-and-metropolis-hastings-in-python-ef21a29e25a
###Code
mod1=lambda t:np.random.normal(10,3,t)
#Form a population of 30,000 individual, with average=10 and scale=3
population = mod1(30000)
#Assume we are only able to observe 1,000 of these individuals.
observation = population[np.random.randint(0, 30000, 1000)]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1,1,1)
ax.hist( observation,bins=35 ,)
ax.set_xlabel("Value")
ax.set_ylabel("Frequency")
ax.set_title("Figure 1: Distribution of 1000 observations sampled from a population of 30,000 with mu=10, sigma=3")
mu_obs=observation.mean()
#The tranistion model defines how to move from sigma_current to sigma_new
transition_model = lambda x: [x[0],np.random.normal(x[1],1,(1,))]
def prior(x):
#x[0] = mu, x[1]=sigma (new or current)
#returns 1 for all valid values of sigma. Log(1) =0, so it does not affect the summation.
#returns 0 for all invalid values of sigma (<=0). Log(0)=-infinity, and Log(negative number) is undefined.
#It makes the new sigma infinitely unlikely.
if(x[1] <=0):
return 0
return 1
#Computes the likelihood of the data given a sigma (new or current) according to equation (2)
def manual_log_like_normal(x,data):
#x[0]=mu, x[1]=sigma (new or current)
#data = the observation
return np.sum(-np.log(x[1] * np.sqrt(2* np.pi) )-((data-x[0])**2) / (2*x[1]**2))
#Same as manual_log_like_normal(x,data), but using scipy implementation. It's pretty slow.
def log_lik_normal(x,data):
#x[0]=mu, x[1]=sigma (new or current)
#data = the observation
return np.sum(np.log(scipy.stats.norm(x[0],x[1]).pdf(data)))
#Defines whether to accept or reject the new sample
def acceptance(x, x_new):
if x_new>x:
return True
else:
accept=np.random.uniform(0,1)
# Since we did a log likelihood, we need to exponentiate in order to compare to the random number
# less likely x_new are less likely to be accepted
return (accept < (np.exp(x_new-x)))
def metropolis_hastings(likelihood_computer,prior, transition_model, param_init,iterations,data,acceptance_rule):
# likelihood_computer(x,data): returns the likelihood that these parameters generated the data
# transition_model(x): a function that draws a sample from a symmetric distribution and returns it
# param_init: a starting sample
# iterations: number of accepted to generated
# data: the data that we wish to model
# acceptance_rule(x,x_new): decides whether to accept or reject the new sample
x = param_init
i_accepted=[]
i_rejected=[]
accepted = []
rejected = []
for i in range(iterations):
x_new = transition_model(x)
x_lik = likelihood_computer(x,data)
x_new_lik = likelihood_computer(x_new,data)
if (acceptance(x_lik + np.log(prior(x)),x_new_lik+np.log(prior(x_new)))):
x = x_new
accepted.append(x_new)
i_accepted.append(i)
else:
rejected.append(x_new)
i_rejected.append(i)
return np.array(accepted), np.array(rejected), np.array(i_accepted), np.array(i_rejected)
accepted, rejected, ia, ir = metropolis_hastings(manual_log_like_normal,prior,transition_model,[mu_obs,0.1], 1000,observation,acceptance)
print(len(accepted), len(rejected))
print(accepted)
f,ax=plt.subplots(2,1, figsize=(10,10))
ax[0].plot(ia[0:50], accepted[:,1][0:50], 'rd')
ax[0].plot(ir[0:50],rejected[:,1][0:50], 'k*')
ax[1].plot(ir,rejected[0:,1], 'k')
ax[1].plot(ia,accepted[:,1], 'r')
###Output
_____no_output_____
###Markdown
SGLD
###Code
from pybnn.sampler import SGLD
import torch.utils.data as data_utils
from itertools import islice
from pybnn.util.infinite_dataloader import infinite_dataloader
theta = [torch.autograd.Variable(torch.DoubleTensor([0]), requires_grad=True),
torch.autograd.Variable(torch.DoubleTensor([0]), requires_grad=True)]
n_burnin = 100
num_steps = n_burnin + 9900
batch_size = 10
sampler = SGLD(theta, lr=1e-2, scale_grad=data.size(0))
burnin_samples = []
samples = []
batch_generator = infinite_dataloader(
data_utils.DataLoader(
data,
batch_size=batch_size,
shuffle=True
))
lr = []
sigma = []
for step, x_batch in islice(enumerate(batch_generator), num_steps):
sampler.zero_grad()
loss = nll(x_batch, theta)
loss.backward()
sampler.step()
if step > n_burnin:
samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
else:
burnin_samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
sigma.append(sampler.state[theta[1]]["sigma"])
samples = np.array(samples)
burnin_samples = np.array(burnin_samples)
plt.contour(x_grid, y_grid, densities, 10, cmap="viridis")
plt.grid(True)
plt.scatter(samples[:, 0], samples[:, 1], color="k")
plt.scatter(burnin_samples[:, 0], burnin_samples[:, 1], color="r")
plt.title("SGLD", fontsize=50)
plt.xlabel(r"$\theta_0$", fontsize=50)
plt.ylabel(r"$\theta_1$", fontsize=50)
plt.show()
###Output
_____no_output_____
###Markdown
Preconditioned SGLD
###Code
from pybnn.sampler.preconditioned_sgld import PreconditionedSGLD
import torch.utils.data as data_utils
from itertools import islice
from pybnn.util.infinite_dataloader import infinite_dataloader
theta = [torch.autograd.Variable(torch.DoubleTensor([0]), requires_grad=True),
torch.autograd.Variable(torch.DoubleTensor([0]), requires_grad=True)]
n_burnin = 100
num_steps = n_burnin + 9900
batch_size = 10
sampler = PreconditionedSGLD(theta, lr=1e-2, num_train_points=data.size(0))
burnin_samples = []
samples = []
batch_generator = infinite_dataloader(
data_utils.DataLoader(
data,
batch_size=batch_size,
shuffle=True
))
for step, x_batch in islice(enumerate(batch_generator), num_steps):
sampler.zero_grad()
loss = nll(x_batch, theta)
loss.backward()
sampler.step()
if step > n_burnin:
samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
else:
burnin_samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
samples = np.array(samples)
burnin_samples = np.array(burnin_samples)
print(samples.shape)
plt.contour(x_grid, y_grid, densities, 10, cmap="viridis")
plt.grid(True)
plt.scatter(samples[:, 0], samples[:, 1], color="k")
plt.scatter(burnin_samples[:, 0], burnin_samples[:, 1], color="r")
plt.title("Preconditioned SGLD", fontsize=50)
plt.xlabel(r"$\theta_0$", fontsize=50)
plt.ylabel(r"$\theta_1$", fontsize=50)
###Output
(9899, 2)
###Markdown
SGHMC
###Code
from pybnn.sampler.sghmc import SGHMC
import torch.utils.data as data_utils
from itertools import islice
from pybnn.util.infinite_dataloader import infinite_dataloader
theta = [torch.autograd.Variable(torch.DoubleTensor([0.0]), requires_grad=True),
torch.autograd.Variable(torch.DoubleTensor([0.0]), requires_grad=True)]
n_burnin = 100
num_steps = n_burnin + 9900
batch_size = 10
sampler = SGHMC(theta, lr=np.float64(1e-2), scale_grad=data.size(0), mdecay=0.01)
burnin_samples = []
samples = []
batch_generator = infinite_dataloader(
data_utils.DataLoader(
data,
batch_size=batch_size,
shuffle=True
))
for step, x_batch in islice(enumerate(batch_generator), num_steps):
sampler.zero_grad()
loss = nll(x_batch, theta)
loss.backward()
sampler.step()
if step > n_burnin:
samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
else:
burnin_samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
samples = np.array(samples)
burnin_samples = np.array(burnin_samples)
print(samples.shape)
plt.contour(x_grid, y_grid, densities, 10, cmap="viridis")
plt.scatter(samples[:, 0], samples[:, 1], color="k")
plt.scatter(burnin_samples[:, 0], burnin_samples[:, 1], color="r")
plt.title("SGHMC", fontsize=50)
plt.xlabel(r"$\theta_0$", fontsize=50)
plt.ylabel(r"$\theta_1$", fontsize=50)
###Output
(9899, 2)
###Markdown
Adaptive-SGHMC
###Code
from pybnn.sampler import AdaptiveSGHMC
import torch.utils.data as data_utils
from copy import deepcopy
from itertools import islice
from pybnn.util.infinite_dataloader import infinite_dataloader
theta = [torch.autograd.Variable(torch.DoubleTensor([0.0]), requires_grad=True),
torch.autograd.Variable(torch.DoubleTensor([0.0]), requires_grad=True)]
n_burnin = 100
num_steps = n_burnin + 9900
batch_size = 10
sampler = AdaptiveSGHMC(theta, num_burn_in_steps=n_burnin, lr=np.float64(1e-2), scale_grad=data.size(0), mdecay=0.01)
burnin_samples = []
samples = []
batch_generator = infinite_dataloader(
data_utils.DataLoader(
data,
batch_size=batch_size,
shuffle=True
))
var_theta0 = []
var_theta1 = []
for step, x_batch in islice(enumerate(batch_generator), num_steps):
sampler.zero_grad()
loss = nll(x_batch, theta)
loss.backward()
sampler.step()
if step > n_burnin:
samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
else:
burnin_samples.append([theta[0].data.numpy()[0], theta[1].data.numpy()[0]])
var_theta0.append(deepcopy(sampler.state[theta[0]]["v_hat"]))
var_theta1.append(deepcopy(sampler.state[theta[1]]["v_hat"]))
samples = np.array(samples)
burnin_samples = np.array(burnin_samples)
plt.contour(x_grid, y_grid, densities, 10, cmap="viridis")
plt.scatter(samples[:, 0], samples[:, 1], color="k")
plt.scatter(burnin_samples[:, 0], burnin_samples[:, 1], color="r")
plt.title("Adaptive-SGHMC", fontsize=50)
plt.xlabel(r"$\theta_0$", fontsize=50)
plt.ylabel(r"$\theta_1$", fontsize=50)
plt.show()
###Output
_____no_output_____
###Markdown
Reading multiple filesCurrently support reading multiple depletion and detector files to obtain true expected quantities and uncertaintines. These are done with the [`sampler`](http://serpent-tools.readthedocs.io/en/latest/api/sampler.html) module.The directories `models/sampled-bwr/dep` and `models/sampled-bwr/det` contain depletion and detector outputs from 100 repeated runs, e.g. only differ by random seed. Only five of such files are tracked in this repository as to not overburden the user, but the approach still appliesThe readers follow the same data structure and retrieval as their serial counterparts.
###Code
%matplotlib inline
detGlob = '../models/sampled-bwr/det/bwr_*_det0.m'
depGlob = '../models/sampled-bwr/dep/bwr_*_dep.m'
###Output
_____no_output_____
###Markdown
Detector
###Code
from serpentTools.samplers.detector import DetectorSampler
%time
det = DetectorSampler(detGlob)
len(det.files), len(det.parsers)
det.detectors
###Output
_____no_output_____
###Markdown
New [`SampledDetector`](http://serpent-tools.readthedocs.io/en/latest/api/sampler.htmlserpentTools.samplers.detector.SampledDetector) objects behave exactly like their standalone counterparts. The relative error attribute is replaced with the aggregation of the errors from all files read.
###Code
s = det.detectors['spectrum']
s.tallies
###Output
_____no_output_____
###Markdown
Information from all standalone detectors used to create this one are retained through the `allTallies` and `allErrors` attributes. These are identical to the `tallies` and `errors` arrays with an additional dimension for the parser index.
###Code
s.allTallies.shape
s.indexes
s.spectrumPlot();
s.spreadPlot('energy', loglog=True);
###Output
_____no_output_____
###Markdown
Depletion
###Code
from serpentTools.samplers.depletion import DepletionSampler
%time
dep = DepletionSampler(depGlob)
len(dep.parsers), len(dep.files)
dep.materials
dep.metadata.keys()
dep.metadata['names']
fuel = dep.materials['fue1']
fuel.getValues('days', 'adens')
ax = fuel.plot('burnup', 'adens', names='Xe135');
fuel.uncertainties.keys()
fuel.uncertainties['adens']
###Output
_____no_output_____
###Markdown
[`SampledDepletedMaterial`](http://serpent-tools.readthedocs.io/en/latest/api/sampler.htmlserpentTools.samplers.depletion.SampledDepletedMaterial) can plot the mean value and show the spread of values from each unique reader.
###Code
ax = fuel.spreadPlot('burnup', 'adens', 'Xe135', logy=False)
###Output
_____no_output_____
###Markdown
SettingsA few settings control how the samplers behave and can improve analysis by:1. Skipping a precheck that ensures all sampled parsers have the same data - [`sampler.skipPrecheck`](http://serpent-tools.readthedocs.io/en/latest/settingsTop.htmlsampler-skipprecheck)1. Not retaining individual samplers after final processing - [`sampler.freeAll`](http://serpent-tools.readthedocs.io/en/latest/settingsTop.htmlsampler-freeall)This does eliminate some functionality, such as `spreadPlots`, which require all parsers and sampled containers to be retained
###Code
from serpentTools .settings import rc
from serpentTools.messages import SamplerError
rc.setValue('sampler.freeAll', True)
detF = DetectorSampler(detGlob)
detF.parsers
sF= detF.detectors['spectrum']
print(sF.allTallies is None)
try:
# catch and print the error raised with this method
sF.spreadPlot()
except SamplerError as se:
print(se)
###Output
Data from all sampled files has been freed and cannot be used in this plot method
###Markdown
Tiny Dataset
###Code
tiny_dataset = final_dataset.select(range(100))
tiny_dataset = tiny_dataset.flatten_indices()
path = os.path.join(root_dir, "tiny_final_small_en_0.3")
tiny_dataset.save_to_disk(path)
###Output
_____no_output_____ |
tutorials/getting_started/1c_query_results.ipynb | ###Markdown
Introduction to FBL: Part 1(c): NeuroNLP Query ResultsIn this tutorial, we provide some details of what is returned by a NLP query or a NeuroArch query (see [this notebook](1b_nlp_queries.ipynb)), in addition to those neuron visualized on the NeuroNLP window. In the Launcher, click on the "Create FBL Workspace" button and choose adult(Hemibrain). Then change the kernel of this notebook (see [this notebook](1_introduction.ipynb) if you are not aware why and how to do it).
###Code
import seaborn as sns
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Retrieve the client that was created:
###Code
my_client = fbl.get_client()
###Output
_____no_output_____
###Markdown
We execute two NLP queries, one to show PEG (PB-EB-LAL) neurons, another to add EPG (EB-LAL-PB) neurons.
###Code
result_peg = my_client.executeNLPquery('show peg')
result_epg = my_client.executeNLPquery('add epg')
###Output
[FBL NLP 2021-01-28 15:54:19,458] NLP successfully parsed query.
###Markdown
We inspect three objects, `result_peg`, `result_epg` and `my_client.NLP_result`
###Code
print('result_peg is an instance of:', type(result_peg))
print('result_epg is an instance of:', type(result_epg))
print('my_client.NLP_result is an instance of:', type(my_client.NLP_result))
###Output
result_peg is an instance of: <class 'flybrainlab.graph.NAqueryResult'>
result_epg is an instance of: <class 'flybrainlab.graph.NAqueryResult'>
my_client.NLP_result is an instance of: <class 'flybrainlab.graph.NeuroNLPResult'>
###Markdown
Note that the two objects returned by the `executeNLPquery` are both of type `NAqueryResult`. They store information about the particular query. For example, we can show the neurons returned by the query:
###Code
{k: v['uname'] for k, v in result_peg.neurons.items()}
###Output
_____no_output_____
###Markdown
Note that the keys of `result_peg.neurons` are record ID of the NeuroArch database and are guaranteed to be unqiue.
###Code
{k: v['uname'] for k, v in result_epg.neurons.items()}
###Output
_____no_output_____
###Markdown
`NeuroNLPResult` is a subclass of `NAqueryResult` and stores a mirror of what is shown in the NeuroNLP window. Therefore, it contains all neurons of the two queries above since the second query uses `add`.
###Code
{k: v['uname'] for k, v in my_client.NLP_result.neurons.items()}
###Output
_____no_output_____
###Markdown
To obtain a graph that representing the connection between neurons, where neurons are the nodes of the graph, and edges are weighted by the number of synapses between two neurons, simply call
###Code
G = my_client.get_neuron_graph(query_result = result_peg,
synapse_threshold = 0,
complete_synapses = True)
###Output
_____no_output_____
###Markdown
Here, `synapse_threshold` filters out connections between neurons that have less than `synapse_threshold` synapses (here we included all connections with value 0). `complete_synapses` is set to `True` to fetch all connections/synapses between the neurons in `result_peg`, even if there are no synapses present in `result_peg`. If `complete_synapses` is set to `False`, only the synapses that are present in the `result_peg` are used to create the graph. We can show the adjacency matrix of this graph by:
###Code
plt.figure(figsize = (12, 10))
M, uname_order = G.adjacency_matrix()
sns.heatmap(M, xticklabels = uname_order, yticklabels = uname_order);
###Output
_____no_output_____
###Markdown
Similarly, we can retrieve the graph representing the connections between neurons in `my_client.NLP_result`, by
###Code
G = my_client.get_neuron_graph(synapse_threshold = 0,
complete_synapses = True)
###Output
_____no_output_____
###Markdown
Here, passing `my_client.NLP_result` is optional. We show the adjacency matrix in logrithmic color range.
###Code
M, uname_order = G.adjacency_matrix()
plt.figure(figsize = (16, 14))
sns.heatmap(np.log10(M+1), xticklabels = uname_order, yticklabels = uname_order);
###Output
_____no_output_____ |
content/lessons/01/Class-Coding-Lab/CCL-walkthrough.ipynb | ###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display (HTML("This is <span style='color:green;'>Working</span>"))
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 6
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
"""
===========
Random data
===========
An animation of random data.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots()
line, = ax.plot(np.random.rand(10))
ax.set_ylim(0, 1)
def update(data):
line.set_ydata(data)
return line,
def data_gen():
while True:
yield np.random.rand(10)
ani = animation.FuncAnimation(fig, update, data_gen, interval=100)
plt.show()
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
"""
Demo of bar plot on a polar axis.
"""
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color:green;'>Working</span>"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("heyo")
###Output
heyo
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
for i in range(0,5):
print("x")
###Output
x
x
x
x
x
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
("Hello Python!")
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.simport numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()how()
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("this is <span Style= 'color: green, '>Working</span>"))
pip install cufflinks
pip install cufflinks
pip install cufflinks
###Output
The following command must be run outside of the IPython shell:
$ pip install cufflinks
The Python package manager (pip) can only be used from outside of IPython.
Please reissue the `pip` command in a separate terminal or command prompt.
See the Python documentation for more informations on how to install packages:
https://docs.python.org/3/installing/
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print ("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPhython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
print ("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPhython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
print ("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPhython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import panda as pd
import numpy as pd
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>working</span>"))
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as pd
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>working</span>"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello python")
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print ("Testing to make sure all modules are installed")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display , HTML
display (HTML("This is <span style='color:green;' >working</span>"))
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='colo: green;'>Working</span"))
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print ('Hello Python!')
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print('Testing to ensure all modules are installed ...')
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display (HTML("This is <span style='color:green;'>WORKING </span>"))
x = int (input())
if x < 0:
print("a")
elif x < 10:
print("b")
else:
print("c")
y=10
z=20
y+z
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print ("Hello,Python")
"""
Demo of bar plot on a polar axis.
"""
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
###Output
/home/nbuser/anaconda3_420/lib/python3.5/site-packages/matplotlib/font_manager.py:281: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
'Matplotlib is building the font cache using fc-list. '
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("hey eddie")
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("hello, python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to insure all modules are installed . . . ")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
###Output
Testing to insure all modules are installed . . .
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print ("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
!pip3 install --user folium
print ("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print ("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bars.set_facecolor(plt.cm.jet(r / 10))
bar.set.alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>working</span>"))
print("Python Check Divider")
people = int(input("How many people are dining? "))
check_total = float(input("What is the total amount of the check? "))
contribution = check_total / people
print("Each person should contribute", contribution, "dollars.")
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print ("Hello Python")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = np.pi 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111 polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly
import folium
from IPython.display import display, HTML
display(HTML("this is <span style='color:green;'>working</span>"))
!pip3 install --user matplotlib
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Hello, Python!")
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
print("Testing to ensure all modules are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: green;'>Working</span>"))
###Output
Testing to ensure all modules are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print("Testing to ensure all modudles are installed...")
import requests
import json
import pandas as pd
import numpy as np
import matplotlib
import plotly
import cufflinks as cf
import folium
from IPython.display import display, HTML
display(HTML("This is <span style='color: blue;'>Working</span>"))
###Output
Testing to ensure all modudles are installed...
###Markdown
Class Coding Lab: WalkthroughFor this class coding lab paste the code provided by the walk through into the cell below. Execute the cell to verfiy the program runs correctly and also saves the notebook output.
###Code
print ("shut yo face")
"""
Demo of bar plot on a polar axis.
"""
import numpy as np
import matplotlib.pyplot as plt
N = 20
theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
radii = 10 * np.random.rand(N)
width = np.pi / 4 * np.random.rand(N)
ax = plt.subplot(111, polar=True)
bars = ax.bar(theta, radii, width=width, bottom=0.0)
# Use custom colors and opacity
for r, bar in zip(radii, bars):
bar.set_facecolor(plt.cm.jet(r / 10.))
bar.set_alpha(0.5)
plt.show()
###Output
_____no_output_____ |
Fine_tuning_Bert.ipynb | ###Markdown
###Code
#@title Activating the GPU
# Main menu->Runtime->Change Runtime Type
import tensorflow as tf
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
#@title Installing the Hugging Face PyTorch Interface for Bert
# !pip install pytorch-pretrained-bert pytorch-nlp
!pip install -q transformers
#@title Importing the modules
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer, BertConfig
from transformers import AdamW, BertForSequenceClassification, get_linear_schedule_with_warmup
from tqdm import tqdm, trange
import pandas as pd
import io
import numpy as np
import matplotlib.pyplot as plt
% matplotlib inline
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
df = pd.read_csv('in_domain_train.tsv', delimiter='\t', header=None,
names = ['sentence_source', 'label', 'label_notes', 'sentence'])
df.shape
df.sample(10)
sentences = df.sentence.values
sentences = ['[CLS] ' + sentence + ' [SEP]' for sentence in sentences]
labels = df.label.values
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print('Tokenize the first sequence: ')
print(tokenized_texts[0])
max_len = 128
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
input_ids = pad_sequences(input_ids, maxlen=max_len, dtype='long', truncating ='post', padding='post')
print(input_ids[0])
attention_masks = []
for seq in input_ids:
seq_mask = [float(i > 0) for i in seq]
attention_masks.append(seq_mask)
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids,
labels,
random_state=2018,
test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks,
input_ids,
random_state=2018,
test_size=0.1)
train_inputs = torch.tensor(train_inputs)
validation_inputs = torch.tensor(validation_inputs)
train_labels = torch.tensor(train_labels)
validation_labels = torch.tensor(validation_labels)
train_masks = torch.tensor(train_masks)
validation_masks = torch.tensor(validation_masks)
batch_size = 32
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
validation_data = TensorDataset(validation_inputs, validation_masks ,validation_labels)
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)
import transformers
from transformers import BertModel, BertConfig
configuration = BertConfig()
model = BertModel(configuration)
configuration = model.config
print(configuration)
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels = 2)
model.cuda()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_params = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.1},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
epochs = 4
optimizer = AdamW(optimizer_grouped_params, lr=2e-5, eps=1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps= total_steps)
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
t = []
train_loss_set = []
for _ in trange(epochs, desc='Epoch'):
model.train() # all this does is set the model to training mode
# (adds dropout and things like that)
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_masks, b_labels = batch
optimizer.zero_grad()
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_masks,
labels = b_labels)
loss = outputs['loss']
train_loss_set.append(loss.item())
loss.backward()
optimizer.step()
scheduler.step()
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print(f'Train loss {tr_loss/nb_tr_steps}')
model.eval() #remove dropout
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in validation_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = logits['logits'].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
plt.figure(figsize=(15, 8))
pls.title('Training loss')
plt.xlabel('Batch')
plt.ylabel('Loss')
plt.plot(train_loss_set)
plt.show()
#@title Predicting and Evaluating Using the Hold-out Dataset
df = pd.read_csv("out_of_domain_dev.tsv", delimiter='\t', header=None, names=['sentence_source', 'label', 'label_notes', 'sentence'])
# Create sentence and label lists
sentences = df.sentence.values
# We need to add special tokens at the beginning and end of each sentence for BERT to work properly
sentences = ["[CLS] " + sentence + " [SEP]" for sentence in sentences]
labels = df.label.values
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
MAX_LEN = 128
# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# Pad our input tokens
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# Create attention masks
attention_masks = []
# Create a mask of 1s for each token followed by 0s for padding
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
prediction_inputs = torch.tensor(input_ids)
prediction_masks = torch.tensor(attention_masks)
prediction_labels = torch.tensor(labels)
batch_size = 32
prediction_data = TensorDataset(prediction_inputs, prediction_masks, prediction_labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
# Prediction on test set
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions , true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
# Move logits and labels to CPU
logits = logits['logits'].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
#@title Evaluating Using Matthew's Correlation Coefficient
# Import and evaluate each test batch using Matthew's correlation coefficient
from sklearn.metrics import matthews_corrcoef
matthews_set = []
for i in range(len(true_labels)):
matthews = matthews_corrcoef(true_labels[i],
np.argmax(predictions[i], axis=1).flatten())
matthews_set.append(matthews)
matthews_set
flat_predictions = [item for sublist in predictions for item in sublist]
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
flat_true_labels = [item for sublist in true_labels for item in sublist]
matthews_corrcoef(flat_true_labels, flat_predictions)
###Output
_____no_output_____ |
site/assignments/assignment5/hm.ipynb | ###Markdown
Homework 05 - Instructions**WARNING!!! If you see this icon on the top of your COLAB sesssion, your work is not saved automatically.****When you are working on homeworks, make sure that you save often. You may find it easier to save intermident copies in Google drive. If you save your working file in Google drive all changes will be saved as you work. MAKE SURE that your final version is saved to GitHub.** Before you turn this problem in, make sure everything runs as expected. First, restart the kernel (in the menubar, select Kernel → Restart) and then run all cells (in the menubar, select Cell → Run All). You can speak with others regarding the assignment but all work must be your own. This is a 30 point assignment graded from answers to questions and automated tests that should be run at the bottom. Be sure to clearly label all of your answers and commit final tests at the end.
###Code
files = "https://github.com/rpi-techfundamentals/introml_website_fall_2020/raw/master/files/assignment5.zip"
!pip install otter-grader && wget $files && unzip -o assignment5.zip
#Run this. It initiates autograding.
import otter
grader = otter.Notebook()
###Output
_____no_output_____
###Markdown
Load DataWe have our titanic dataset that is a bit different from what we have had previously. Load the train-new.csv and test-new.csv into dataframes train and test.
###Code
# Load the data here
###Output
_____no_output_____
###Markdown
Question 1(1) Investigate the data a little bit. What is different from some of the titanic datasets we have used in the past? (For example, compare against the data in the Kaggle Baseline notebook).
###Code
man1="""
"""
###Output
_____no_output_____
###Markdown
Generating Dummy VariablesBefore we do analysis of the titanic dataset, we have to select out our features, for the train and the test set, which we shall label `X_train`, and `X_test`.As a part of this we need to generate `n-1` dummy variables for each one of our categorical columns. The resulting dataframes should be all numeric and have all of these columns below (in the correct order).Follow the example above to generate a new value for `X_train` and `X_test` utilizing all the data.```['Age', 'SibSp', 'Parch', 'Fare', 'family_size', 'Pclass_2', 'Pclass_3', 'Sex_male', 'Cabin_B', 'Cabin_C', 'Cabin_D', 'Cabin_E', 'Cabin_F', 'Cabin_G', 'Cabin_H', 'Embarked_Q', 'Embarked_S']```*Hint, try: `help(pd.get_dummies)`* You should also set `y` to the Survived column.
###Code
#Answer Here
grader.check('q01')
###Output
_____no_output_____
###Markdown
Split Training Set For Cross Validation(2.) We want to split up our training set `X` so that we can do some cross validation. Specifically, we will start to use the term validation set for the set we will use to validate our model. In doing so below, use the sklearn methods to do a train test (i.e., validation) split. From X y dataframe, generate the following dataframes by drawing the data **randomly** from the train dataframe 80% of the data in train and 20% of the data in test. So that you get repeatable results, set the `random_state=100`. This will set a "seed" so that your random selection will be the same as mine and you will pass the internal tests. train_X, val_X, train_y, val_y
###Code
#Answer Here
grader.check('q02')
###Output
_____no_output_____
###Markdown
Perform Nearest Neighbor Classification (KNeighborsClassifier)(3.) Using the default options (i.e., all default hyperparameters), perform nearest neighbor classification. Calculate the accuracy measure using `metrics.accuracy_score`.Train your model using the training data and access the accuracy of both the training and validation data.*Note: You only train the model once...on the training data. You then assess the performance on both the training and validation data.*Assign the following variables:`knn0_train_y` = The KNN prediction for the `train_X` data. `knn0_val_y` = The KNN prediction for the `val_X` data. `knn0_train_accuracy` = The accuracy for the `knn0_train_y` prediction. `knn0_val_accuracy` = The accuracy for the `knn0_val_y` prediction.
###Code
#Answer Here
grader.check('q03')
###Output
_____no_output_____
###Markdown
Confusion MatrixWe can utilize a confusion matrix to be able to understand misclassifications a bit more. This will give us a full idea of the true positives, true negatives, false positives, and false negatives. See the documentation [here](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html). You can utilize the syntax below to generate knn_mat1_train and knn_mat1_test. ```from sklearn.metrics import confusion_matrixconfusion_matrix(y_true, y_pred)```**(4.) Explain what each of the four quadrants of the confusion matrix means. **
###Code
#Answer here
man4= """
"""
###Output
_____no_output_____
###Markdown
Create Confusion Matrix for the Training and Validation Predictions(5) Create a confusion matrix for each of the training and valiation predictions. `knn0_con_train` A confusion matrix for the training data. `knn0_con_val` A confusion matrix for the validation data.
###Code
#Answers
grader.check('q05')
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning(6) You created a single model using the default parameters. However, we want to adjust the parameters to try and improve the model. Examine the documentation on KNN and see some of the different parameters that you can adjust. [Scikit Learn Documentation](http://scikit-learn.org/stable/supervised_learning.htmlsupervised-learning).Assign the following variables:`knn1_train_y` = The KNN prediction for the `train_X` datafor your improved model. `knn1_val_y` = The KNN prediction for the `val_X` data for your improved model. `knn1_train_accuracy` = The accuracy for the `knn1_train_y` prediction. `knn1_val_accuracy` = The accuracy for the `knn1_val_y` prediction.
###Code
#Answers
grader.check('q06')
###Output
_____no_output_____
###Markdown
Other Models(7.) Test Logistic regression and 1 other algorithms/models (your choice). Provide a summary of the best performance below. Use any of the available classification models. You should show and comment code[Scikit Learn Documentation](http://scikit-learn.org/stable/supervised_learning.htmlsupervised-learning).*Make sure your clearly indicate the accuracy of the Logistic regression model your other model. Assess which model worked best considering all your efforts.*
###Code
#Answer here
man7= """
"""
#This runs all tests.
grader.check_all()
###Output
_____no_output_____ |
notebooks/Reducer.ipynb | ###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. **Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)```The input is reduced to the maximum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import bookutils
from Fuzzer import RandomFuzzer, Runner
import re
class MysteryRunner(Runner):
def run(self, inp):
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer:
def __init__(self, runner, log_test=False):
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self):
self.tests = 0
def test(self, inp):
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp):
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
def reduce(self, inp):
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self):
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp):
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError as exc:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Let us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. A Class for Reducing with GrammarsWe introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
def __init__(self, runner, parser, log_test=False, log_reduce=False):
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
def tree_list_to_string(q):
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists):
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree):
(symbol, children) = tree
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree):
(symbol, children) = tree
if len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree, symbol, depth=-1, ignore_root=True):
# Find all subtrees in TREE whose root is SYMBOL.
# If IGNORE_ROOT is true, ignore the root note of TREE.
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree, symbol, depth=-1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here are _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree, symbol, depth=-1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree, subtree, depth=-1):
symbol, children = subtree
if len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
Let us try this out in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp=""):
result, outcome = super().run(inp)
if result.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return result, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
result, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. **Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'Traceback (most recent call last): File "", line 1, in ZeroDivisionError: division by zero```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome'FAIL'```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)'3/0'```The input is reduced to the maximum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` class with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import fuzzingbook_utils
from Fuzzer import RandomFuzzer, Runner
import re
class MysteryRunner(Runner):
def run(self, inp):
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is comething we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` methods runs a single test (with logging, if so wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer(object):
def __init__(self, runner, log_test=False):
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self):
self.tests = 0
def test(self, inp):
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp):
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
def reduce(self, inp):
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only three characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self):
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp):
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError as exc:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Lat us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. A Class for Reducing with GrammarsWe introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
def __init__(self, runner, parser, log_test=False, log_reduce=False):
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
def tree_list_to_string(q):
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists):
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree):
(symbol, children) = tree
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree):
(symbol, children) = tree
if len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree whose root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree, symbol, depth=-1, ignore_root=True):
# Find all subtrees in TREE whose root is SYMBOL.
# If IGNORE_ROOT is true, ignore the root note of TREE.
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree, symbol, depth=-1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here's _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree, symbol, depth=-1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree, subtree, depth=-1):
symbol, children = subtree
if len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
Let us try this out in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps already are good, we can still do better. If we look at the log, above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp=""):
result, outcome = super().run(inp)
if result.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return result, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
result, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. **Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'Traceback (most recent call last): File "", line 1, in ZeroDivisionError: division by zero```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome'FAIL'```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)'3/0'```The input is reduced to the maximum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` class with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import fuzzingbook_utils
from Fuzzer import RandomFuzzer, Runner
import re
class MysteryRunner(Runner):
def run(self, inp):
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is comething we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` methods runs a single test (with logging, if so wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer(object):
def __init__(self, runner, log_test=False):
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self):
self.tests = 0
def test(self, inp):
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp):
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
def reduce(self, inp):
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only three characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self):
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp):
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError as exc:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Lat us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. A Class for Reducing with GrammarsWe introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
def __init__(self, runner, parser, log_test=False, log_reduce=False):
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
def tree_list_to_string(q):
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists):
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree):
(symbol, children) = tree
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree):
(symbol, children) = tree
if len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree whose root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree, symbol, depth=-1, ignore_root=True):
# Find all subtrees in TREE whose root is SYMBOL.
# If IGNORE_ROOT is true, ignore the root note of TREE.
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree, symbol, depth=-1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here's _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree, symbol, depth=-1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree, subtree, depth=-1):
symbol, children = subtree
if len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
Let us try this out in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps already are good, we can still do better. If we look at the log, above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp=""):
result, outcome = super().run(inp)
if result.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return result, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
result, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging.
###Code
from bookutils import YouTubeVideo
YouTubeVideo('JOv1xGVdXAU')
###Output
_____no_output_____
###Markdown
**Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)```The input is reduced to the minimum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import bookutils
from bookutils import quiz
from typing import Tuple, List, Sequence, Any, Optional
from ExpectError import ExpectError
from Fuzzer import RandomFuzzer, Runner, Outcome
import re
class MysteryRunner(Runner):
def run(self, inp: str) -> Tuple[str, Outcome]:
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer:
"""Base class for reducers."""
def __init__(self, runner: Runner, log_test: bool = False) -> None:
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self) -> None:
"""Reset the test counter to zero. To be extended in subclasses."""
self.tests = 0
def test(self, inp: str) -> Outcome:
"""Test with input `inp`. Return outcome.
To be extended in subclasses."""
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp: str) -> str:
"""Reduce input `inp`. Return reduced input.
To be defined in subclasses."""
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
"""A reducer that also caches test outcomes"""
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
"""Reduce inputs using delta debugging."""
def reduce(self, inp: str) -> str:
"""Reduce input `inp` using delta debugging. Return reduced input."""
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0.0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing.
###Code
quiz("What happens if the function under test does not fail?",
[
"Delta debugging searches for the minimal input"
" that produces the same result",
"Delta debugging starts a fuzzer to find a failure",
"Delta debugging raises an AssertionError",
"Delta debugging runs forever in a loop",
], '0 ** 0 + 1 ** 0 + 0 ** 1 + 1 ** 1')
###Output
_____no_output_____
###Markdown
Indeed, `DeltaDebugger` checks if its assumptions hold. If not, an assertion fails.
###Code
with ExpectError():
dd_reducer.reduce("I am a passing input")
###Output
_____no_output_____
###Markdown
Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser, Parser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self) -> None:
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp: str) -> Tuple[str, Outcome]:
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from Grammars import Grammar
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Let us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. Excursion: A Class for Reducing with Grammars We introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
"""Reduce inputs using grammars"""
def __init__(self, runner: Runner, parser: Parser, *,
log_test: bool = False, log_reduce: bool = False):
"""Constructor.
`runner` is the runner to be used.
`parser` is the parser to be used.
`log_test` - if set, show tests and results.
`log_reduce` - if set, show reduction steps.
"""
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
from GrammarFuzzer import DerivationTree
def tree_list_to_string(q: List[DerivationTree]) -> str:
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists: List[List[Any]]) -> List[List[Any]]:
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree: DerivationTree) -> int:
(symbol, children) = tree
if children is None:
return 1
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree: DerivationTree) -> int:
(symbol, children) = tree
if children is None or len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree: DerivationTree,
symbol: str, depth: int = -1,
ignore_root: bool = True) -> List[DerivationTree]:
"""Find all subtrees in `tree` whose root is `symbol`.
If `ignore_root` is true, ignore the root note of `tree`."""
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree: DerivationTree, symbol: str,
depth: int = -1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here are _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree: DerivationTree, symbol: str,
depth: int = -1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree: DerivationTree,
subtree: DerivationTree, depth: int = -1):
symbol, children = subtree
if children is None or len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
if child is None:
continue
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
End of Excursion Let us try out our `GrammarReducer` class in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
import subprocess
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp: str = "") -> Tuple[subprocess.CompletedProcess, Outcome]:
process, outcome = super().run(inp)
if process.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return process, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
process, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
The input is reduced to the minimum: We get the essence of the division by zero.
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([DeltaDebuggingReducer, GrammarReducer],
public_methods=[
Reducer.__init__,
Reducer.reset,
Reducer.reduce,
DeltaDebuggingReducer.reduce,
GrammarReducer.__init__,
GrammarReducer.reduce,
],
types={
'DerivationTree': DerivationTree,
'Grammar': Grammar,
'Outcome': Outcome,
},
project='fuzzingbook')
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. **Prerequisites*** As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The simple "delta debugging" technique for reduction has no specific prerequisites.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce input in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` class with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import fuzzingbook_utils
from Fuzzer import RandomFuzzer, Runner
class MysteryRunner(Runner):
def run(self, inp):
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out. For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is comething we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` methods runs a single test (with logging, if so wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer(object):
def __init__(self, runner, log=False):
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log = log
self.reset()
def reset(self):
self.tests = 0
def test(self, inp):
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp):
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on.Our implementation uses almost the same Python code as Zeller in \cite{Zeller2011}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
def reduce(self, inp):
self.reset()
assert self.test(inp) == Runner.FAIL
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this is 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `28`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (wither the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a very robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. Grammar-Based Input Reduction\todo{Add}
###Code
from Parser import PEGParser
from GrammarFuzzer import all_terminals, expansion_to_children
# For logging the queue
def queue_to_string(q):
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
derivation_tree = ("<start>",
[("<expr>",
[("<expr>", None),
(" + ", []),
("<term>", None)]
)])
queue_to_string([derivation_tree, derivation_tree])
from Grammars import START_SYMBOL
class GrammarReducer(Reducer):
def __init__(self, runner, grammar, start_symbol=START_SYMBOL, log=False):
super().__init__(runner, log=log)
self.grammar = grammar
self.start_symbol = start_symbol
self.parser = PEGParser(grammar, start_symbol=start_symbol)
class GrammarReducer(GrammarReducer):
def derivation_reductions(self, tree):
(symbol, children) = tree
if len(children) == 0:
return [] # Terminal symbol
print("Trying alternative expansions for " + symbol)
# Possible expansions for this symbol
expansions = self.grammar[symbol]
print("Expansions: " + repr(expansions))
alternatives = \
[expansion_to_children(expansion) for expansion in expansions]
reductions = []
for alternative in alternatives:
if len(alternative) > len(children):
continue # New alternative has more children
match = True
new_children_reductions = []
# print("Trying alternative expansion " + queue_to_string(alternative))
for alt_child in alternative:
(alt_symbol, _) = alt_child
child_reductions = subtrees_with_symbol(alt_symbol, tree)
if len(child_reductions) == 0:
# Child not found; cannot apply rule
match = False
break
# print("Found alternatives " + queue_to_string(child_reductions))
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Go through the possible combinations
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) >= number_of_nodes(tree):
continue # No reduction
reductions.append(new_tree)
# Apply this recursively
if children is not None:
for i in range(0, len(children)):
child = children[i]
child_reductions = self.derivation_reductions(child)
for reduced_child in child_reductions:
new_children = (children[:i]
+ [reduced_child]
+ children[i + 1:])
reductions.append((symbol, new_children))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
reductions = unique_reductions
if len(reductions) > 0:
# We have a new expansion
print("Can reduce " + symbol + " " + all_terminals(tree)
+ " to reduced subtrees " + queue_to_string(reductions))
return reductions
class GrammarReducer(GrammarReducer):
def reductions(self, tree):
return self.derivation_reductions(tree)
class GrammarReducer(GrammarReducer):
# Reduce with respect to a given test
def reduce_tree(self, tree):
# Find possible reductions
smallest_tree = tree
tree_reductions = self.reductions(tree)
print("Alternatives: " + queue_to_string(tree_reductions))
while len(tree_reductions) > 0:
t = tree_reductions[0]
tree_reductions = tree_reductions[1:]
s = all_terminals(t)
if self.test(s) == Runner.FAIL:
# Found new smallest tree; try to reduce that one further
smallest_tree = t
tree_reductions = self.reductions(t)
tree_reductions.sort(key=lambda tree: -number_of_nodes(tree))
print("New smallest tree: " + all_terminals(smallest_tree))
return smallest_tree
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree = self.parser.parse(inp)[0]
print(all_terminals(tree))
return tree
def reduce(self, inp):
tree = self.parse(inp)
smallest_tree = self.reduce_tree(tree)
return all_terminals(smallest_tree)
# Find all subtrees in TREE whose root is SEARCH_SYMBOL.
# If IGNORE_ROOT is true, ignore the root note of TREE.
def subtrees_with_symbol(search_symbol, tree, ignore_root=True):
ret = []
(symbol, children) = tree
if not ignore_root and symbol == search_symbol:
ret.append(tree)
# Search across all children
if children is not None:
for c in children:
ret += subtrees_with_symbol(search_symbol, c, False)
return ret
# convert a list [[X1, X2], [Y1, Y2], ...]
# into [X1, Y1], [X1, Y2], [X2, Y1], [X2, Y2], ...
def possible_combinations(list_of_lists):
if len(list_of_lists) == 0:
return []
# print(list_of_lists)
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
# print("New combo: ", repr(new_combo))
ret.append(new_combo)
return ret
# Return the number of nodes
def number_of_nodes(tree):
(symbol, children) = tree
n = 1
for c in children:
n += number_of_nodes(c)
return n
from Grammars import EXPR_GRAMMAR
inp = "1 + (2 * 3)"
grammar_reducer = GrammarReducer(mystery, EXPR_GRAMMAR)
grammar_reducer.reduce(inp)
###Output
_____no_output_____
###Markdown
_Section 4_\todo{Add} Lessons Learned* _Lesson one_* _Lesson two_* _Lesson three_ Next Steps_Link to subsequent chapters (notebooks) here, as in:_* [use _mutations_ on existing inputs to get more valid inputs](MutationFuzzer.ipynb)* [use _grammars_ (i.e., a specification of the input format) to get even more valid inputs](Grammars.ipynb)* [reduce _failing inputs_ for efficient debugging](Reducer.ipynb) BackgroundThe delta debugging algorithm discussed here stems from \cite{Zeller2002}; actually, this is the exact Python implementation as used by Zeller in 2002. The idea of systematically reducing inputs has been discovered a number of times, although not as automatic and generic as delta debugging. \cite{Slutz1998}, for instance, discusses systematic reduction of SQL statements for SQL databases; the general process as manual work is well described by \cite{Kernighan1999}.\cite{Herfert2017}\todo{Add more: hierarchical DD, Csmith / C-reduce} ExercisesClose the chapter with a few exercises such that people have things to do. To make the solutions hidden (to be revealed by the user), have them start with```markdown**Solution.**```Your solution can then extend up to the next title (i.e., any markdown cell starting with ``).Running `make metadata` will automatically add metadata to the cells such that the cells will be hidden by default, and can be uncovered by the user. The button will be introduced above the solution. Exercise 1: _Title__Text of the exercise_
###Code
# Some code that is part of the exercise
pass
###Output
_____no_output_____
###Markdown
_Some more text for the exercise_ **Solution.** _Some text for the solution_
###Code
# Some code for the solution
2 + 2
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. **Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)```The input is reduced to the minimum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import bookutils
from Fuzzer import RandomFuzzer, Runner
import re
class MysteryRunner(Runner):
def run(self, inp):
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer(object):
def __init__(self, runner, log_test=False):
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self):
self.tests = 0
def test(self, inp):
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp):
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
def reduce(self, inp):
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self):
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp):
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError as exc:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Let us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. A Class for Reducing with GrammarsWe introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
def __init__(self, runner, parser, log_test=False, log_reduce=False):
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
def tree_list_to_string(q):
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists):
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree):
(symbol, children) = tree
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree):
(symbol, children) = tree
if len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree, symbol, depth=-1, ignore_root=True):
# Find all subtrees in TREE whose root is SYMBOL.
# If IGNORE_ROOT is true, ignore the root note of TREE.
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree, symbol, depth=-1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here are _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree, symbol, depth=-1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree, subtree, depth=-1):
symbol, children = subtree
if len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
Let us try this out in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp=""):
result, outcome = super().run(inp)
if result.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return result, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
result, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging. **Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)```The input is reduced to the minimum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import bookutils
from Fuzzer import RandomFuzzer, Runner
import re
class MysteryRunner(Runner):
def run(self, inp):
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer(object):
def __init__(self, runner, log_test=False):
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self):
self.tests = 0
def test(self, inp):
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp):
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
def reduce(self, inp):
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing. Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self):
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp):
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError as exc:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Let us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. A Class for Reducing with GrammarsWe introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
def __init__(self, runner, parser, log_test=False, log_reduce=False):
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
def tree_list_to_string(q):
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists):
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree):
(symbol, children) = tree
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree):
(symbol, children) = tree
if len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree, symbol, depth=-1, ignore_root=True):
# Find all subtrees in TREE whose root is SYMBOL.
# If IGNORE_ROOT is true, ignore the root note of TREE.
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree, symbol, depth=-1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here are _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree, symbol, depth=-1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree, subtree, depth=-1):
symbol, children = subtree
if len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
Let us try this out in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp=""):
result, outcome = super().run(inp)
if result.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return result, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
result, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging.
###Code
from bookutils import YouTubeVideo
YouTubeVideo('noJUPjSJVh0')
###Output
_____no_output_____
###Markdown
**Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)```The input is reduced to the minimum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import bookutils
from bookutils import quiz
from typing import Tuple, List, Sequence, Any, Optional
from ExpectError import ExpectError
from Fuzzer import RandomFuzzer, Runner, Outcome
import re
class MysteryRunner(Runner):
def run(self, inp: str) -> Tuple[str, Outcome]:
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer:
"""Base class for reducers."""
def __init__(self, runner: Runner, log_test: bool = False) -> None:
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self) -> None:
"""Reset the test counter to zero. To be extended in subclasses."""
self.tests = 0
def test(self, inp: str) -> Outcome:
"""Test with input `inp`. Return outcome.
To be extended in subclasses."""
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp: str) -> str:
"""Reduce input `inp`. Return reduced input.
To be defined in subclasses."""
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
"""A reducer that also caches test outcomes"""
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
"""Reduce inputs using delta debugging."""
def reduce(self, inp: str) -> str:
"""Reduce input `inp` using delta debugging. Return reduced input."""
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0.0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing.
###Code
quiz("What happens if the function under test does not fail?",
[
"Delta debugging searches for the minimal input"
" that produces the same result",
"Delta debugging starts a fuzzer to find a failure",
"Delta debugging raises an AssertionError",
"Delta debugging runs forever in a loop",
], '0 ** 0 + 1 ** 0 + 0 ** 1 + 1 ** 1')
###Output
_____no_output_____
###Markdown
Indeed, `DeltaDebugger` checks if its assumptions hold. If not, an assertion fails.
###Code
with ExpectError():
dd_reducer.reduce("I am a passing input")
###Output
_____no_output_____
###Markdown
Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser, Parser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self) -> None:
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp: str) -> Tuple[str, Outcome]:
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from Grammars import Grammar
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Let us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. Excursion: A Class for Reducing with Grammars We introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
"""Reduce inputs using grammars"""
def __init__(self, runner: Runner, parser: Parser, *,
log_test: bool = False, log_reduce: bool = False):
"""Constructor.
`runner` is the runner to be used.
`parser` is the parser to be used.
`log_test` - if set, show tests and results.
`log_reduce` - if set, show reduction steps.
"""
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
from GrammarFuzzer import DerivationTree
def tree_list_to_string(q: List[DerivationTree]) -> str:
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists: List[List[Any]]) -> List[List[Any]]:
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree: DerivationTree) -> int:
(symbol, children) = tree
if children is None:
return 1
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree: DerivationTree) -> int:
(symbol, children) = tree
if children is None or len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree: DerivationTree,
symbol: str, depth: int = -1,
ignore_root: bool = True) -> List[DerivationTree]:
"""Find all subtrees in `tree` whose root is `symbol`.
If `ignore_root` is true, ignore the root note of `tree`."""
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree: DerivationTree, symbol: str,
depth: int = -1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here are _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree: DerivationTree, symbol: str,
depth: int = -1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree: DerivationTree,
subtree: DerivationTree, depth: int = -1):
symbol, children = subtree
if children is None or len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
if child is None:
continue
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
End of Excursion Let us try out our `GrammarReducer` class in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
import subprocess
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp: str = "") -> Tuple[subprocess.CompletedProcess, Outcome]:
process, outcome = super().run(inp)
if process.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return process, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
process, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
The input is reduced to the minimum: We get the essence of the division by zero.
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([DeltaDebuggingReducer, GrammarReducer],
public_methods=[
Reducer.__init__,
Reducer.reset,
Reducer.reduce,
DeltaDebuggingReducer.reduce,
GrammarReducer.__init__,
GrammarReducer.reduce,
],
types={
'DerivationTree': DerivationTree,
'Grammar': Grammar,
'Outcome': Outcome,
},
project='fuzzingbook')
###Output
_____no_output_____
###Markdown
Reducing Failure-Inducing InputsBy construction, fuzzers create inputs that may be hard to read. This causes issues during _debugging_, when a human has to analyze the exact cause of the failure. In this chapter, we present techniques that _automatically reduce and simplify failure-inducing inputs to a minimum_ in order to ease debugging.
###Code
from bookutils import YouTubeVideo
YouTubeVideo('noJUPjSJVh0')
###Output
_____no_output_____
###Markdown
**Prerequisites*** The simple "delta debugging" technique for reduction has no specific prerequisites.* As reduction is typically used together with fuzzing, reading the [chapter on basic fuzzing](Fuzzer.ipynb) is a good idea.* The later grammar-based techniques require knowledge on [derivation trees](GrammarFuzzer.ipynb) and [parsing](Parser.ipynb). SynopsisTo [use the code provided in this chapter](Importing.ipynb), write```python>>> from fuzzingbook.Reducer import ```and then make use of the following features.A _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers.Here is a simple example: An arithmetic expression causes an error in the Python interpreter:```python>>> !python -c 'x = 1 + 2 * 3 / 0'```Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.```python>>> from Fuzzer import ProgramRunner>>> class ZeroDivisionRunner(ProgramRunner):>>> """Make outcome 'FAIL' if ZeroDivisionError occurs""">>> def run(self, inp=""):>>> result, outcome = super().run(inp)>>> if result.stderr.find('ZeroDivisionError') >= 0:>>> outcome = 'FAIL'>>> return result, outcome```If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.```python>>> python_input = "x = 1 + 2 * 3 / 0">>> python_runner = ZeroDivisionRunner("python")>>> result, outcome = python_runner.run(python_input)>>> outcome```Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:```python>>> dd = DeltaDebuggingReducer(python_runner)>>> dd.reduce(python_input)```The input is reduced to the minimum: We get the essence of the division by zero. Why Reducing?At this point, we have seen a number of test generation techniques that all in some form produce inputs in order to trigger failures. If they are successful – that is, the program actually fails – we must find out why the failure occurred and how to fix it. Here's an example of such a situation. We have a class `MysteryRunner` with a `run()` method that – given its code – can occasionally fail. But under which circumstances does this actually happen? We have deliberately obscured the exact condition in order to make this non-obvious.
###Code
import bookutils
from bookutils import quiz
from typing import Tuple, List, Sequence, Any, Optional
from ExpectError import ExpectError
from Fuzzer import RandomFuzzer, Runner, Outcome
import re
class MysteryRunner(Runner):
def run(self, inp: str) -> Tuple[str, Outcome]:
x = inp.find(chr(0o17 + 0o31))
y = inp.find(chr(0o27 + 0o22))
if x >= 0 and y >= 0 and x < y:
return (inp, Runner.FAIL)
else:
return (inp, Runner.PASS)
###Output
_____no_output_____
###Markdown
Let us fuzz the function until we find a failing input.
###Code
mystery = MysteryRunner()
random_fuzzer = RandomFuzzer()
while True:
inp = random_fuzzer.fuzz()
result, outcome = mystery.run(inp)
if outcome == mystery.FAIL:
break
failing_input = result
failing_input
###Output
_____no_output_____
###Markdown
Something in this input causes `MysteryRunner` to fail. But what is it? Manual Input ReductionOne important step in the debugging process is _reduction_ – that is, to identify those circumstances of a failure that are relevant for the failure to occur, and to _omit_ (if possible) those parts that are not. As Kernighan and Pike \cite{Kernighan1999} put it:> For every circumstance of the problem, check whether it is relevant for the problem to occur. If it is not, remove it from the problem report or the test case in question. Specifically for inputs, they suggest a _divide and conquer_ process:> Proceed by binary search. Throw away half the input and see if the output is still wrong; if not, go back to the previous state and discard the other half of the input.This is something we can easily try out, using our last generated input:
###Code
failing_input
###Output
_____no_output_____
###Markdown
For instance, we can see whether the error still occurs if we only feed in the first half:
###Code
half_length = len(failing_input) // 2 # // is integer division
first_half = failing_input[:half_length]
mystery.run(first_half)
###Output
_____no_output_____
###Markdown
Nope – the first half alone does not suffice. Maybe the second half?
###Code
second_half = failing_input[half_length:]
mystery.run(second_half)
###Output
_____no_output_____
###Markdown
This did not go so well either. We may still proceed by cutting away _smaller chunks_ – say, one character after another. If our test is deterministic and easily repeated, it is clear that this process eventually will yield a reduced input. But still, it is a rather inefficient process, especially for long inputs. What we need is a _strategy_ that effectively minimizes a failure-inducing input – a strategy that can be automated. Delta Debugging One strategy to effectively reduce failure-inducing inputs is _delta debugging_ \cite{Zeller2002}. Delta Debugging implements the "binary search" strategy, as listed above, but with a twist: If neither half fails (also as above), it keeps on cutting away smaller and smaller chunks from the input, until it eliminates individual characters. Thus, after cutting away the first half, we cut awaythe first quarter, the second quarter, and so on. Let us illustrate this on our example, and see what happens if we cut away the first quarter.
###Code
quarter_length = len(failing_input) // 4
input_without_first_quarter = failing_input[quarter_length:]
mystery.run(input_without_first_quarter)
###Output
_____no_output_____
###Markdown
Ah! This has failed, and reduced our failing input by 25%. Let's remove another quarter.
###Code
input_without_first_and_second_quarter = failing_input[quarter_length * 2:]
mystery.run(input_without_first_and_second_quarter)
###Output
_____no_output_____
###Markdown
This is not too surprising, as we had that one before:
###Code
second_half
input_without_first_and_second_quarter
###Output
_____no_output_____
###Markdown
How about removing the third quarter, then?
###Code
input_without_first_and_third_quarter = failing_input[quarter_length:
quarter_length * 2] + failing_input[quarter_length * 3:]
mystery.run(input_without_first_and_third_quarter)
###Output
_____no_output_____
###Markdown
Ok. Let us remove the fourth quarter.
###Code
input_without_first_and_fourth_quarter = failing_input[quarter_length:quarter_length * 3]
mystery.run(input_without_first_and_fourth_quarter)
###Output
_____no_output_____
###Markdown
Yes! This has succeeded. Our input is now 50% smaller. We have now tried to remove pieces that make up $\frac{1}{2}$ and $\frac{1}{4}$ of the original failing string. In the next iteration, we would go and remove even smaller pieces – $\frac{1}{8}$, $\frac{1}{16}$ and so on. We continue until we are down to $\frac{1}{97}$ – that is, individual characters. However, this is something we happily let a computer do for us. We first introduce a `Reducer` class as an abstract superclass for all kinds of reducers. The `test()` method runs a single test (with logging, if wanted); the `reduce()` method will eventually reduce an input to the minimum.
###Code
class Reducer:
"""Base class for reducers."""
def __init__(self, runner: Runner, log_test: bool = False) -> None:
"""Attach reducer to the given `runner`"""
self.runner = runner
self.log_test = log_test
self.reset()
def reset(self) -> None:
"""Reset the test counter to zero. To be extended in subclasses."""
self.tests = 0
def test(self, inp: str) -> Outcome:
"""Test with input `inp`. Return outcome.
To be extended in subclasses."""
result, outcome = self.runner.run(inp)
self.tests += 1
if self.log_test:
print("Test #%d" % self.tests, repr(inp), repr(len(inp)), outcome)
return outcome
def reduce(self, inp: str) -> str:
"""Reduce input `inp`. Return reduced input.
To be defined in subclasses."""
self.reset()
# Default: Don't reduce
return inp
###Output
_____no_output_____
###Markdown
The `CachingReducer` variant saves test results, such that we don't have to run the same tests again and again:
###Code
class CachingReducer(Reducer):
"""A reducer that also caches test outcomes"""
def reset(self):
super().reset()
self.cache = {}
def test(self, inp):
if inp in self.cache:
return self.cache[inp]
outcome = super().test(inp)
self.cache[inp] = outcome
return outcome
###Output
_____no_output_____
###Markdown
Here comes the _Delta Debugging_ reducer. Delta Debugging implements the strategy sketched above: It first removes larger chunks of size $\frac{1}{2}$; if this does not fail, then we proceed to chunks of size $\frac{1}{4}$, then $\frac{1}{8}$ and so on. Our implementation uses almost the same Python code as Zeller in \cite{Zeller2002}; the only difference is that it has been adapted to work on Python 3 and our `Runner` framework. The variable `n` (initially 2) indicates the granularity – in each step, chunks of size $\frac{1}{n}$ are cut away. If none of the test fails (`some_complement_is_failing` is False), then `n` is doubled – until it reaches the length of the input.
###Code
class DeltaDebuggingReducer(CachingReducer):
"""Reduce inputs using delta debugging."""
def reduce(self, inp: str) -> str:
"""Reduce input `inp` using delta debugging. Return reduced input."""
self.reset()
assert self.test(inp) != Runner.PASS
n = 2 # Initial granularity
while len(inp) >= 2:
start = 0.0
subset_length = len(inp) / n
some_complement_is_failing = False
while start < len(inp):
complement = inp[:int(start)] + \
inp[int(start + subset_length):]
if self.test(complement) == Runner.FAIL:
inp = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(inp):
break
n = min(n * 2, len(inp))
return inp
###Output
_____no_output_____
###Markdown
To see how the `DeltaDebuggingReducer` works, let us run it on our failing input. With each step, we see how the remaining input gets smaller and smaller, until only two characters remain:
###Code
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(failing_input)
###Output
_____no_output_____
###Markdown
Now we know why `MysteryRunner` fails – it suffices that the input contains two matching parentheses. Delta Debugging determines this in 29 steps. Its result is _1-minimal_, meaning that every character contained is required to produce the error; removing any (as seen in tests `27` and `29`, above) no longer makes the test fail. This property is guaranteed by the delta debugging algorithm, which in its last stage always tries to delete characters one by one. A reduced test case such as the one above has many advantages:* A reduced test case __reduces the _cognitive load_ of the programmer__. The test case is shorter and focused, and thus does not burden the programmer with irrelevant details. A reduced input typically leads to shorter executions and smaller program states, both of which reduce the search space as it comes to understanding the bug. In our case, we have eliminated lots of irrelevant input – only the two characters the reduced input contains are relevant.* A reduced test case __is easier to communicate__. All one needs here is the summary: `MysteryRunner fails on "()"`, which is much better than `MysteryRunner fails on a 4100-character input (attached)`.* A reduced test case helps in __identifying duplicates__. If similar bugs have been reported already, and all of them have been reduced to the same cause (namely that the input contains matching parentheses), then it becomes obvious that all these bugs are different symptoms of the same underlying cause – and would all be resolved at once with one code fix. How effective is delta debugging? In the best case (when the left half or the right half fails), the number of tests is logarithmic proportional to the length $n$ of an input (i.e., $O(\log_2 n)$); this is the same complexity as binary search. In the worst case, though, delta debugging can require a number of tests proportional to $n^2$ (i.e., $O(n^2)$) – this happens in the case when we are down to character granularity, and we have to repeatedly tried to delete all characters, only to find that deleting the last character results in a failure \cite{Zeller2002}. (This is a pretty pathological situation, though.) In general, delta debugging is a robust algorithm that is easy to implement, easy to deploy, and easy to use – provided that the underlying test case is deterministic and runs quickly enough to warrant a number of experiments. As these are the same prerequisites that make fuzzing effective, delta debugging makes an excellent companion to fuzzing.
###Code
quiz("What happens if the function under test does not fail?",
[
"Delta debugging searches for the minimal input"
" that produces the same result",
"Delta debugging starts a fuzzer to find a failure",
"Delta debugging raises an AssertionError",
"Delta debugging runs forever in a loop",
], '0 ** 0 + 1 ** 0 + 0 ** 1 + 1 ** 1')
###Output
_____no_output_____
###Markdown
Indeed, `DeltaDebugger` checks if its assumptions hold. If not, an assertion fails.
###Code
with ExpectError():
dd_reducer.reduce("I am a passing input")
###Output
_____no_output_____
###Markdown
Grammar-Based Input ReductionIf the input language is syntactically complex, delta debugging may take several attempts at reduction, and may not be able to reduce inputs at all. In the second half of this chapter, we thus introduce an algorithm named _Grammar-Based Reduction_ (or GRABR for short) that makes use of _grammars_ to reduce syntactically complex inputs. Lexical Reduction vs. Syntactic RulesDespite its general robustness, there are situations in which delta debugging might be inefficient or outright fail. As an example, consider some _expression input_ such as `1 + (2 * 3)`. Delta debugging requires a number of tests to simplify the failure-inducing input, but it eventually returns a minimal input
###Code
expr_input = "1 + (2 * 3)"
dd_reducer = DeltaDebuggingReducer(mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Looking at the tests, above, though, only few of them actually represent syntactically valid arithmetic expressions. In a practical setting, we may want to test a program which actually _parses_ such expressions, and which would _reject_ all invalid inputs. We define a class `EvalMysteryRunner` which first _parses_ the given input (according to the rules of our expression grammar), and _only_ if it fits would it be passed to our original `MysteryRunner`. This simulates a setting in which we test an expression interpreter, and in which only valid inputs can trigger the bug.
###Code
from Grammars import EXPR_GRAMMAR
from Parser import EarleyParser, Parser # minor dependency
class EvalMysteryRunner(MysteryRunner):
def __init__(self) -> None:
self.parser = EarleyParser(EXPR_GRAMMAR)
def run(self, inp: str) -> Tuple[str, Outcome]:
try:
tree, *_ = self.parser.parse(inp)
except SyntaxError:
return (inp, Runner.UNRESOLVED)
return super().run(inp)
eval_mystery = EvalMysteryRunner()
###Output
_____no_output_____
###Markdown
Under these circumstances, it turns out that delta debugging utterly fails. None of the reductions it applies yield a syntactically valid input, so the input as a whole remains as complex as it was before.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery, log_test=True)
dd_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
This behavior is possible if the program under test has several constraints regarding input validity. Delta debugging is not aware of these constraints (nor of the input structure in general), so it might violate these constraints again and again. A Grammmar-Based Reduction ApproachTo reduce inputs with high syntactical complexity, we use another approach: Rather than reducing the input string, we reduce the _tree_ representing its structure. The general idea is to start with a _derivation tree_ coming from parsing the input, and then _substitute subtrees by smaller subtrees of the same type_. These alternate subtrees can either come1. From the tree itself, or2. By applying an alternate grammar expansion using elements from the tree. Let us show these two strategies using an example. We start with a derivation tree from an arithmetic expression:
###Code
from Grammars import Grammar
from GrammarFuzzer import all_terminals, expansion_to_children, display_tree
derivation_tree, *_ = EarleyParser(EXPR_GRAMMAR).parse(expr_input)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplifying by Replacing Subtrees To simplify this tree, we could replace any `` symbol up in the tree with some `` subtree down in the tree. For instance, we could replace the uppermost `` with its right `` subtree, yielding the string `(2 + 3)`:
###Code
import copy
new_derivation_tree = copy.deepcopy(derivation_tree)
# We really should have some query language
sub_expr_tree = new_derivation_tree[1][0][1][2]
display_tree(sub_expr_tree)
new_derivation_tree[1][0] = sub_expr_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
Replacing one subtree by another only works as long as individual elements such as `` occur multiple times in our tree. In the reduced `new_derivation_tree`, above, we could replace further `` trees only once more. Simplifying by Alternative Expansions A second means to simplify this tree is to apply _alternative expansions_. That is, for a symbol, we check whether there is an alternative expansion with a smaller number of children. Then, we replace the symbol with the alternative expansion, filling in needed symbols from the tree. As an example, consider the `new_derivation_tree` above. The applied expansion for `` has been ::= * Let us replace this with the alternative expansion: ::=
###Code
term_tree = new_derivation_tree[1][0][1][0][1][0][1][1][1][0]
display_tree(term_tree)
shorter_term_tree = term_tree[1][2]
display_tree(shorter_term_tree)
new_derivation_tree[1][0][1][0][1][0][1][1][1][0] = shorter_term_tree
display_tree(new_derivation_tree)
all_terminals(new_derivation_tree)
###Output
_____no_output_____
###Markdown
If we replace derivation subtrees by (smaller) subtrees, and if we search for alternate expansions that again yield smaller subtrees, we can systematically simplify the input. This could be much faster than delta debugging, as our inputs would always be syntactically valid. However, we need a strategy for when to apply which simplification rule. This is what we develop in the remainder of this section. Excursion: A Class for Reducing with Grammars We introduce the `GrammarReducer` class, which is again a `Reducer`. Note that we derive from `CachingReducer`, as the strategy will produce several duplicates.
###Code
class GrammarReducer(CachingReducer):
"""Reduce inputs using grammars"""
def __init__(self, runner: Runner, parser: Parser, *,
log_test: bool = False, log_reduce: bool = False):
"""Constructor.
`runner` is the runner to be used.
`parser` is the parser to be used.
`log_test` - if set, show tests and results.
`log_reduce` - if set, show reduction steps.
"""
super().__init__(runner, log_test=log_test)
self.parser = parser
self.grammar = parser.grammar()
self.start_symbol = parser.start_symbol()
self.log_reduce = log_reduce
self.try_all_combinations = False
###Output
_____no_output_____
###Markdown
A Few HelpersWe define a number of helper functions, which we will need for our strategy. `tree_list_to_string()` does what the name suggest, creating a string from a list of derivation trees:
###Code
from GrammarFuzzer import DerivationTree
def tree_list_to_string(q: List[DerivationTree]) -> str:
return "[" + ", ".join([all_terminals(tree) for tree in q]) + "]"
tree_list_to_string([derivation_tree, derivation_tree])
###Output
_____no_output_____
###Markdown
The function `possible_combinations()` takes a list of lists $[[x_1, x_2], [y_1, y_2], \dots]$ and creates a list of combinations $[[x_1, y_1], [x_1, y_2], [x_2, y_1], [x_2, y_2], \dots]$.
###Code
def possible_combinations(list_of_lists: List[List[Any]]) -> List[List[Any]]:
if len(list_of_lists) == 0:
return []
ret = []
for e in list_of_lists[0]:
if len(list_of_lists) == 1:
ret.append([e])
else:
for c in possible_combinations(list_of_lists[1:]):
new_combo = [e] + c
ret.append(new_combo)
return ret
possible_combinations([[1, 2], ['a', 'b']])
###Output
_____no_output_____
###Markdown
The functions `number_of_nodes()` and `max_height()` return the number of nodes and the maximum height of the given tree, respectively.
###Code
def number_of_nodes(tree: DerivationTree) -> int:
(symbol, children) = tree
if children is None:
return 1
return 1 + sum([number_of_nodes(c) for c in children])
number_of_nodes(derivation_tree)
def max_height(tree: DerivationTree) -> int:
(symbol, children) = tree
if children is None or len(children) == 0:
return 1
return 1 + max([max_height(c) for c in children])
max_height(derivation_tree)
###Output
_____no_output_____
###Markdown
Simplification StrategiesLet us now implement our two simplification strategies – replacing subtrees and alternate expansions. Finding SubtreesThe method `subtrees_with_symbol()` returns all subtrees in the given tree which's root is equal to the given symbol. If `ignore_root` is set (default), then the root node of `tree` is not compared against. (The `depth` parameter will be discussed below.)
###Code
class GrammarReducer(GrammarReducer):
def subtrees_with_symbol(self, tree: DerivationTree,
symbol: str, depth: int = -1,
ignore_root: bool = True) -> List[DerivationTree]:
"""Find all subtrees in `tree` whose root is `symbol`.
If `ignore_root` is true, ignore the root note of `tree`."""
ret = []
(child_symbol, children) = tree
if depth <= 0 and not ignore_root and child_symbol == symbol:
ret.append(tree)
# Search across all children
if depth != 0 and children is not None:
for c in children:
ret += self.subtrees_with_symbol(c,
symbol,
depth=depth - 1,
ignore_root=False)
return ret
###Output
_____no_output_____
###Markdown
Here's an example: These are all subtrees with `` in our derivation tree `derivation_tree`.
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
If we want to replace `` subtrees to simplify the tree, these are the subtrees we could replace them with. Alternate Expansions Our second strategy, simplifying by alternate expansions, is a bit more complex. We first fetch the possible expansions for the given symbol (starting with the ones with the fewest children). For each expansion, we fill in values for the symbols from the subtree (using `subtrees_with_symbols()`, above). We then pick the first possible combination (or _all_ combinations, if the attribute `try_all_combinations` is set).
###Code
class GrammarReducer(GrammarReducer):
def alternate_reductions(self, tree: DerivationTree, symbol: str,
depth: int = -1):
reductions = []
expansions = self.grammar.get(symbol, [])
expansions.sort(
key=lambda expansion: len(
expansion_to_children(expansion)))
for expansion in expansions:
expansion_children = expansion_to_children(expansion)
match = True
new_children_reductions = []
for (alt_symbol, _) in expansion_children:
child_reductions = self.subtrees_with_symbol(
tree, alt_symbol, depth=depth)
if len(child_reductions) == 0:
match = False # Child not found; cannot apply rule
break
new_children_reductions.append(child_reductions)
if not match:
continue # Try next alternative
# Use the first suitable combination
for new_children in possible_combinations(new_children_reductions):
new_tree = (symbol, new_children)
if number_of_nodes(new_tree) < number_of_nodes(tree):
reductions.append(new_tree)
if not self.try_all_combinations:
break
# Sort by number of nodes
reductions.sort(key=number_of_nodes)
return reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
Here are _all_ combinations for ``:
###Code
grammar_reducer.try_all_combinations = True
print([all_terminals(t)
for t in grammar_reducer.alternate_reductions(derivation_tree, "<term>")])
###Output
_____no_output_____
###Markdown
The default, though, is simply to return the first of these:
###Code
grammar_reducer.try_all_combinations = False
[all_terminals(t) for t in grammar_reducer.alternate_reductions(
derivation_tree, "<term>")]
###Output
_____no_output_____
###Markdown
Both Strategies Together Let us now merge both strategies. To replace a subtree with a given symbol, we first search for already existing subtrees (using `subtrees_with_symbol()`); then we go for alternate expansions (using `alternate_expansions()`).
###Code
class GrammarReducer(GrammarReducer):
def symbol_reductions(self, tree: DerivationTree, symbol: str,
depth: int = -1):
"""Find all expansion alternatives for the given symbol"""
reductions = (self.subtrees_with_symbol(tree, symbol, depth=depth)
+ self.alternate_reductions(tree, symbol, depth=depth))
# Filter duplicates
unique_reductions = []
for r in reductions:
if r not in unique_reductions:
unique_reductions.append(r)
return unique_reductions
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Note how we first return subtrees (`1 + (2 * 3)`, `(2 * 3)`, `2 * 3`) before going for alternate expansions of `` (`1`).
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<expr>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
These are the possible reductions for `` nodes. Again, we first have subtrees of the derivation tree, followed by the alternate expansion `1 * 1`.
###Code
reductions = grammar_reducer.symbol_reductions(derivation_tree, "<term>")
tree_list_to_string([r for r in reductions])
###Output
_____no_output_____
###Markdown
The Reduction StrategyWe are now able to return a number of alternatives for each symbol in the tree. This is what we apply in the core function of our reduction strategy, `reduce_subtree()`. Starting with `subtree`, for every child, we find possible reductions. For every reduction, we replace the child with the reduction and test the resulting (full) tree. If it fails, our reduction was successful; otherwise, we put the child back into place and try out the next reduction. Eventually, we apply `reduce_subtree()` on all children, reducing these as well.
###Code
class GrammarReducer(GrammarReducer):
def reduce_subtree(self, tree: DerivationTree,
subtree: DerivationTree, depth: int = -1):
symbol, children = subtree
if children is None or len(children) == 0:
return False
if self.log_reduce:
print("Reducing", all_terminals(subtree), "with depth", depth)
reduced = False
while True:
reduced_child = False
for i, child in enumerate(children):
if child is None:
continue
(child_symbol, _) = child
for reduction in self.symbol_reductions(
child, child_symbol, depth):
if number_of_nodes(reduction) >= number_of_nodes(child):
continue
# Try this reduction
if self.log_reduce:
print(
"Replacing",
all_terminals(
children[i]),
"by",
all_terminals(reduction))
children[i] = reduction
if self.test(all_terminals(tree)) == Runner.FAIL:
# Success
if self.log_reduce:
print("New tree:", all_terminals(tree))
reduced = reduced_child = True
break
else:
# Didn't work out - restore
children[i] = child
if not reduced_child:
if self.log_reduce:
print("Tried all alternatives for", all_terminals(subtree))
break
# Run recursively
for c in children:
if self.reduce_subtree(tree, c, depth):
reduced = True
return reduced
###Output
_____no_output_____
###Markdown
All we now need is a few drivers. The method `reduce_tree()` is the main entry point into `reduce_subtree()`:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
return self.reduce_subtree(tree, tree)
###Output
_____no_output_____
###Markdown
The custom method `parse()` turns a given input into a derivation tree:
###Code
class GrammarReducer(GrammarReducer):
def parse(self, inp):
tree, *_ = self.parser.parse(inp)
if self.log_reduce:
print(all_terminals(tree))
return tree
###Output
_____no_output_____
###Markdown
The method `reduce()` is the one single entry point, parsing the input and then reducing it.
###Code
class GrammarReducer(GrammarReducer):
def reduce(self, inp):
tree = self.parse(inp)
self.reduce_tree(tree)
return all_terminals(tree)
###Output
_____no_output_____
###Markdown
End of Excursion Let us try out our `GrammarReducer` class in practice on our input `expr_input` and the `mystery()` function. How quickly can we reduce it?
###Code
expr_input
grammar_reducer = GrammarReducer(
eval_mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
Success! In only five steps, our `GrammarReducer` reduces the input to the minimum that causes the failure. Note how all tests are syntactically valid by construction, avoiding the `UNRESOLVED` outcomes that cause delta debugging to stall. A Depth-Oriented Strategy Even if five steps are already good, we can still do better. If we look at the log above, we see that after test `2`, where the input (tree) is reduced to `2 * 3`, our `GrammarReducer` first tries to replace the tree with `2` and `3`, which are the alternate `` subtrees. This may work, of course; but if there are many possible subtrees, our strategy will spend quite some time trying one after the other. Delta debugging, as introduced above, follows the idea of trying to cut inputs approximately in half, and thus quickly proceeds towards a minimal input. By replacing a tree with much smaller subtrees, we _could_ possibly reduce a tree significantly, but may need several attempts to do so. A better strategy is to only consider _large_ subtrees first – both for the subtree replacement as well as for alternate expansions. To find such _large_ subtrees, we limit the _depth_ by which we search for possible replacements in the subtree – first, by looking at the direct descendants, later at lower descendants. This is the role of the `depth` parameter used in `subtrees_with_symbol()` and passed through the invoking functions. If set, _only_ symbols at the given depth are returned. Here's an example, starting again with our derivation tree `derivation_tree`:
###Code
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_reduce=True)
all_terminals(derivation_tree)
display_tree(derivation_tree)
###Output
_____no_output_____
###Markdown
At a depth of 1, there is no `` symbol:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=1)]
###Output
_____no_output_____
###Markdown
At a depth of 2, we have the `` subtree on the left hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=2)]
###Output
_____no_output_____
###Markdown
At a depth of 3, we have the `` subtree on the right hand side:
###Code
[all_terminals(t) for t in grammar_reducer.subtrees_with_symbol(
derivation_tree, "<term>", depth=3)]
###Output
_____no_output_____
###Markdown
The idea is now to start with a depth of 0, subsequently increasing it as we proceed:
###Code
class GrammarReducer(GrammarReducer):
def reduce_tree(self, tree):
depth = 0
while depth < max_height(tree):
reduced = self.reduce_subtree(tree, tree, depth)
if reduced:
depth = 0 # Start with new tree
else:
depth += 1 # Extend search for subtrees
return tree
grammar_reducer = GrammarReducer(
mystery,
EarleyParser(EXPR_GRAMMAR),
log_test=True)
grammar_reducer.reduce(expr_input)
###Output
_____no_output_____
###Markdown
We see that a depth-oriented strategy needs even fewer steps in our setting. Comparing StrategiesWe close by demonstrating the difference between text-based delta debugging and our grammar-based reduction. We build a very long expression:
###Code
from GrammarFuzzer import GrammarFuzzer
long_expr_input = GrammarFuzzer(EXPR_GRAMMAR, min_nonterminals=100).fuzz()
long_expr_input
###Output
_____no_output_____
###Markdown
With grammars, we need only a handful of tests to find the failure-inducing input:
###Code
from Timer import Timer
grammar_reducer = GrammarReducer(eval_mystery, EarleyParser(EXPR_GRAMMAR))
with Timer() as grammar_time:
print(grammar_reducer.reduce(long_expr_input))
grammar_reducer.tests
grammar_time.elapsed_time()
###Output
_____no_output_____
###Markdown
Delta debugging, in contrast, requires orders of magnitude more tests (and consequently, time). Again, the reduction is not closely as perfect as it is with the grammar-based reducer.
###Code
dd_reducer = DeltaDebuggingReducer(eval_mystery)
with Timer() as dd_time:
print(dd_reducer.reduce(long_expr_input))
dd_reducer.tests
dd_time.elapsed_time()
###Output
_____no_output_____
###Markdown
We see that if an input is syntactically complex, using a grammar to reduce inputs is the best way to go. SynopsisA _reducer_ takes a failure-inducing input and reduces it to the minimum that still reproduces the failure. This chapter provides `Reducer` classes that implement such reducers. Here is a simple example: An arithmetic expression causes an error in the Python interpreter:
###Code
!python -c 'x = 1 + 2 * 3 / 0'
###Output
_____no_output_____
###Markdown
Can we reduce this input to a minimum? To use a `Reducer`, one first has to build a `Runner` whose outcome is `FAIL` if the precise error occurs. We therefore build a `ZeroDivisionRunner` whose `run()` method will specifically return a `FAIL` outcome if a `ZeroDivisionError` occurs.
###Code
from Fuzzer import ProgramRunner
import subprocess
class ZeroDivisionRunner(ProgramRunner):
"""Make outcome 'FAIL' if ZeroDivisionError occurs"""
def run(self, inp: str = "") -> Tuple[subprocess.CompletedProcess, Outcome]:
process, outcome = super().run(inp)
if process.stderr.find('ZeroDivisionError') >= 0:
outcome = 'FAIL'
return process, outcome
###Output
_____no_output_____
###Markdown
If we feed this expression into a `ZeroDivisionRunner`, it will produce an outcome of `FAIL` as designed.
###Code
python_input = "x = 1 + 2 * 3 / 0"
python_runner = ZeroDivisionRunner("python")
process, outcome = python_runner.run(python_input)
outcome
###Output
_____no_output_____
###Markdown
Delta Debugging is a simple and robust reduction algorithm. We can tie a `DeltaDebuggingReducer` to this runner, and have it determine the substring that causes the `python` program to fail:
###Code
dd = DeltaDebuggingReducer(python_runner)
dd.reduce(python_input)
###Output
_____no_output_____
###Markdown
The input is reduced to the minimum: We get the essence of the division by zero.
###Code
# ignore
from ClassDiagram import display_class_hierarchy
# ignore
display_class_hierarchy([DeltaDebuggingReducer, GrammarReducer],
public_methods=[
Reducer.__init__,
Reducer.reset,
Reducer.reduce,
DeltaDebuggingReducer.reduce,
GrammarReducer.__init__,
GrammarReducer.reduce,
],
types={
'DerivationTree': DerivationTree,
'Grammar': Grammar,
'Outcome': Outcome,
},
project='fuzzingbook')
###Output
_____no_output_____ |
docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb | ###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) **Recordings:** [](https://youtu.be/4UyBxlJChfc) In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of image recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install --quiet pytorch-lightning>=1.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token" and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is a grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* An **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log(f'{mode}_loss', loss)
self.log(f'{mode}_acc', acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print(f"Found pretrained model at {pretrained_filename}, loading...")
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)
test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have a couple of hyperparameters that we have to set. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth discussing the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4, and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact on a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformers, however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick an embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____
###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of imagine recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves, and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install pytorch-lightning==1.3.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print("Downloading %s..." % file_url)
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token", and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* A **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log('%s_loss' % mode, loss)
self.log('%s_acc' % mode, acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
"""
Inputs:
model_name - Name of the model you want to run. Is used to look up the class in "model_dict"
save_name (optional) - If specified, this name will be used for creating the checkpoint and logging directory.
"""
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print("Found pretrained model at %s, loading..." % pretrained_filename)
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)
test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have couple of hyperparameter that we have to choose. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth to discuss the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4 and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance, and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact to a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformer however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick a embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____
###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) **Recordings:** [](https://youtu.be/4UyBxlJChfc) **Author:** Phillip Lippe In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of image recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install --quiet pytorch-lightning>=1.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token" and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is a grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* An **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log(f'{mode}_loss', loss)
self.log(f'{mode}_acc', acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print(f"Found pretrained model at {pretrained_filename}, loading...")
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, val_loader, verbose=False)
test_result = trainer.test(model, test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have a couple of hyperparameters that we have to set. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth discussing the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4, and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact on a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformers, however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick an embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____
###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of image recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install pytorch-lightning==1.3.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token" and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is a grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* An **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log(f'{mode}_loss', loss)
self.log(f'{mode}_acc', acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print(f"Found pretrained model at {pretrained_filename}, loading...")
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)
test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have a couple of hyperparameters that we have to set. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth discussing the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4, and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact on a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformers, however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick an embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____
###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of imagine recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves, and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install pytorch-lightning==1.3.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print("Downloading %s..." % file_url)
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token", and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* A **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log('%s_loss' % mode, loss)
self.log('%s_acc' % mode, acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print("Found pretrained model at %s, loading..." % pretrained_filename)
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)
test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have couple of hyperparameter that we have to choose. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth to discuss the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4 and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance, and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact to a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformer however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick a embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____
###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) **Recordings:** [](https://youtu.be/4UyBxlJChfc) In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of image recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install pytorch-lightning==1.3.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token" and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is a grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* An **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log(f'{mode}_loss', loss)
self.log(f'{mode}_acc', acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print(f"Found pretrained model at {pretrained_filename}, loading...")
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, test_dataloaders=val_loader, verbose=False)
test_result = trainer.test(model, test_dataloaders=test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have a couple of hyperparameters that we have to set. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth discussing the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4, and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact on a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformers, however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick an embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____
###Markdown
Tutorial 15: Vision Transformers**Filled notebook:** [](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb)[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial15/Vision_Transformer.ipynb) **Pre-trained models:** [](https://github.com/phlippe/saved_models/tree/main/tutorial15)[](https://drive.google.com/drive/folders/1BmisSPs5BXQKpolyHp5X4klSAhpDx479?usp=sharing) **Recordings:** [](https://youtu.be/4UyBxlJChfc) In this tutorial, we will take a closer look at a recent new trend: Transformers for Computer Vision. Since [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) successfully applied a Transformer on a variety of image recognition benchmarks, there have been an incredible amount of follow-up works showing that CNNs might not be optimal architecture for Computer Vision anymore. But how do Vision Transformers work exactly, and what benefits and drawbacks do they offer in contrast to CNNs? We will answer these questions by implementing a Vision Transformer ourselves and train it on the popular, small dataset CIFAR10. We will compare these results to the convolutional architectures of [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html). If you are not familiar with Transformers yet, take a look at [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html) where we discuss the fundamentals of Multi-Head Attention and Transformers. As in many previous tutorials, we will use [PyTorch Lightning](https://www.pytorchlightning.ai/) again (introduced in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html)). Let's start with importing our standard set of libraries.
###Code
## Standard libraries
import os
import numpy as np
import random
import math
import json
from functools import partial
from PIL import Image
## Imports for plotting
import matplotlib.pyplot as plt
plt.set_cmap('cividis')
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## tqdm for loading bars
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
## Torchvision
import torchvision
from torchvision.datasets import CIFAR10
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install --quiet pytorch-lightning>=1.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Import tensorboard
%load_ext tensorboard
# Path to the folder where the datasets are/should be downloaded (e.g. CIFAR10)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial15"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.determinstic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
print("Device:", device)
###Output
Global seed set to 42
###Markdown
We provide a pre-trained Vision Transformer which we download in the next cell. However, Vision Transformers can be relatively quickly trained on CIFAR10 with an overall training time of less than an hour on an NVIDIA TitanRTX. Feel free to experiment with training your own Transformer once you went through the whole notebook.
###Code
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/"
# Files to download
pretrained_files = ["tutorial15/ViT.ckpt", "tutorial15/tensorboards/ViT/events.out.tfevents.ViT",
"tutorial5/tensorboards/ResNet/events.out.tfevents.resnet"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name.split("/",1)[1])
if "/" in file_name.split("/",1)[1]:
os.makedirs(file_path.rsplit("/",1)[0], exist_ok=True)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
###Output
_____no_output_____
###Markdown
We load the CIFAR10 dataset below. We use the same setup of the datasets and data augmentations as for the CNNs in Tutorial 5 to keep a fair comparison. The constants in the `transforms.Normalize` correspond to the values that scale and shift the data to a zero mean and standard deviation of one.
###Code
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# For training, we add some augmentation. Networks are too powerful and would overfit.
train_transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop((32,32),scale=(0.8,1.0),ratio=(0.9,1.1)),
transforms.ToTensor(),
transforms.Normalize([0.49139968, 0.48215841, 0.44653091], [0.24703223, 0.24348513, 0.26158784])
])
# Loading the training dataset. We need to split it into a training and validation part
# We need to do a little trick because the validation set should not use the augmentation.
train_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=train_transform, download=True)
val_dataset = CIFAR10(root=DATASET_PATH, train=True, transform=test_transform, download=True)
pl.seed_everything(42)
train_set, _ = torch.utils.data.random_split(train_dataset, [45000, 5000])
pl.seed_everything(42)
_, val_set = torch.utils.data.random_split(val_dataset, [45000, 5000])
# Loading the test set
test_set = CIFAR10(root=DATASET_PATH, train=False, transform=test_transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
train_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=4)
val_loader = data.DataLoader(val_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=128, shuffle=False, drop_last=False, num_workers=4)
# Visualize some examples
NUM_IMAGES = 4
CIFAR_images = torch.stack([val_set[idx][0] for idx in range(NUM_IMAGES)], dim=0)
img_grid = torchvision.utils.make_grid(CIFAR_images, nrow=4, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
plt.figure(figsize=(8,8))
plt.title("Image examples of the CIFAR10 dataset")
plt.imshow(img_grid)
plt.axis('off')
plt.show()
plt.close()
###Output
Files already downloaded and verified
Files already downloaded and verified
###Markdown
Transformers for image classificationTransformers have been originally proposed to process sets since it is a permutation-equivariant architecture, i.e., producing the same output permuted if the input is permuted. To apply Transformers to sequences, we have simply added a positional encoding to the input feature vectors, and the model learned by itself what to do with it. So, why not do the same thing on images? This is exactly what [Alexey Dosovitskiy et al.](https://openreview.net/pdf?id=YicbFdNTTy) proposed in their paper "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale". Specifically, the Vision Transformer is a model for image classification that views images as sequences of smaller patches. As a preprocessing step, we split an image of, for example, $48\times 48$ pixels into 9 $16\times 16$ patches. Each of those patches is considered to be a "word"/"token" and projected to a feature space. With adding positional encodings and a token for classification on top, we can apply a Transformer as usual to this sequence and start training it for our task. A nice GIF visualization of the architecture is shown below (figure credit - [Phil Wang](https://github.com/lucidrains/vit-pytorch/blob/main/images/vit.gif)):We will walk step by step through the Vision Transformer, and implement all parts by ourselves. First, let's implement the image preprocessing: an image of size $N\times N$ has to be split into $(N/M)^2$ patches of size $M\times M$. These represent the input words to the Transformer.
###Code
def img_to_patch(x, patch_size, flatten_channels=True):
"""
Inputs:
x - torch.Tensor representing the image of shape [B, C, H, W]
patch_size - Number of pixels per dimension of the patches (integer)
flatten_channels - If True, the patches will be returned in a flattened format
as a feature vector instead of a image grid.
"""
B, C, H, W = x.shape
x = x.reshape(B, C, H//patch_size, patch_size, W//patch_size, patch_size)
x = x.permute(0, 2, 4, 1, 3, 5) # [B, H', W', C, p_H, p_W]
x = x.flatten(1,2) # [B, H'*W', C, p_H, p_W]
if flatten_channels:
x = x.flatten(2,4) # [B, H'*W', C*p_H*p_W]
return x
###Output
_____no_output_____
###Markdown
Let's take a look at how that works for our CIFAR examples above. For our images of size $32\times 32$, we choose a patch size of 4. Hence, we obtain sequences of 64 patches of size $4\times 4$. We visualize them below:
###Code
img_patches = img_to_patch(CIFAR_images, patch_size=4, flatten_channels=False)
fig, ax = plt.subplots(CIFAR_images.shape[0], 1, figsize=(14,3))
fig.suptitle("Images as input sequences of patches")
for i in range(CIFAR_images.shape[0]):
img_grid = torchvision.utils.make_grid(img_patches[i], nrow=64, normalize=True, pad_value=0.9)
img_grid = img_grid.permute(1, 2, 0)
ax[i].imshow(img_grid)
ax[i].axis('off')
plt.show()
plt.close()
###Output
_____no_output_____
###Markdown
Compared to the original images, it is much harder to recognize the objects from those patch lists now. Still, this is the input we provide to the Transformer for classifying the images. The model has to learn itself how it has to combine the patches to recognize the objects. The inductive bias in CNNs that an image is a grid of pixels, is lost in this input format.After we have looked at the preprocessing, we can now start building the Transformer model. Since we have discussed the fundamentals of Multi-Head Attention in [Tutorial 6](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial6/Transformers_and_MHAttention.html), we will use the PyTorch module `nn.MultiheadAttention` ([docs](https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html?highlight=multiheadtorch.nn.MultiheadAttention)) here. Further, we use the Pre-Layer Normalization version of the Transformer blocks proposed by [Ruibin Xiong et al.](http://proceedings.mlr.press/v119/xiong20b/xiong20b.pdf) in 2020. The idea is to apply Layer Normalization not in between residual blocks, but instead as a first layer in the residual blocks. This reorganization of the layers supports better gradient flow and removes the necessity of a warm-up stage. A visualization of the difference between the standard Post-LN and the Pre-LN version is shown below.The implementation of the Pre-LN attention block looks as follows:
###Code
class AttentionBlock(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_heads, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super().__init__()
self.layer_norm_1 = nn.LayerNorm(embed_dim)
self.attn = nn.MultiheadAttention(embed_dim, num_heads)
self.layer_norm_2 = nn.LayerNorm(embed_dim)
self.linear = nn.Sequential(
nn.Linear(embed_dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, embed_dim),
nn.Dropout(dropout)
)
def forward(self, x):
inp_x = self.layer_norm_1(x)
x = x + self.attn(inp_x, inp_x, inp_x)[0]
x = x + self.linear(self.layer_norm_2(x))
return x
###Output
_____no_output_____
###Markdown
Now we have all modules ready to build our own Vision Transformer. Besides the Transformer encoder, we need the following modules:* A **linear projection** layer that maps the input patches to a feature vector of larger size. It is implemented by a simple linear layer that takes each $M\times M$ patch independently as input.* A **classification token** that is added to the input sequence. We will use the output feature vector of the classification token (CLS token in short) for determining the classification prediction.* Learnable **positional encodings** that are added to the tokens before being processed by the Transformer. Those are needed to learn position-dependent information, and convert the set to a sequence. Since we usually work with a fixed resolution, we can learn the positional encodings instead of having the pattern of sine and cosine functions.* An **MLP head** that takes the output feature vector of the CLS token, and maps it to a classification prediction. This is usually implemented by a small feed-forward network or even a single linear layer.With those components in mind, let's implement the full Vision Transformer below:
###Code
class VisionTransformer(nn.Module):
def __init__(self, embed_dim, hidden_dim, num_channels, num_heads, num_layers, num_classes, patch_size, num_patches, dropout=0.0):
"""
Inputs:
embed_dim - Dimensionality of the input feature vectors to the Transformer
hidden_dim - Dimensionality of the hidden layer in the feed-forward networks
within the Transformer
num_channels - Number of channels of the input (3 for RGB)
num_heads - Number of heads to use in the Multi-Head Attention block
num_layers - Number of layers to use in the Transformer
num_classes - Number of classes to predict
patch_size - Number of pixels that the patches have per dimension
num_patches - Maximum number of patches an image can have
dropout - Amount of dropout to apply in the feed-forward network and
on the input encoding
"""
super().__init__()
self.patch_size = patch_size
# Layers/Networks
self.input_layer = nn.Linear(num_channels*(patch_size**2), embed_dim)
self.transformer = nn.Sequential(*[AttentionBlock(embed_dim, hidden_dim, num_heads, dropout=dropout) for _ in range(num_layers)])
self.mlp_head = nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, num_classes)
)
self.dropout = nn.Dropout(dropout)
# Parameters/Embeddings
self.cls_token = nn.Parameter(torch.randn(1,1,embed_dim))
self.pos_embedding = nn.Parameter(torch.randn(1,1+num_patches,embed_dim))
def forward(self, x):
# Preprocess input
x = img_to_patch(x, self.patch_size)
B, T, _ = x.shape
x = self.input_layer(x)
# Add CLS token and positional encoding
cls_token = self.cls_token.repeat(B, 1, 1)
x = torch.cat([cls_token, x], dim=1)
x = x + self.pos_embedding[:,:T+1]
# Apply Transforrmer
x = self.dropout(x)
x = x.transpose(0, 1)
x = self.transformer(x)
# Perform classification prediction
cls = x[0]
out = self.mlp_head(cls)
return out
###Output
_____no_output_____
###Markdown
Finally, we can put everything into a PyTorch Lightning Module as usual. We use `torch.optim.AdamW` as the optimizer, which is Adam with a corrected weight decay implementation. Since we use the Pre-LN Transformer version, we do not need to use a learning rate warmup stage anymore. Instead, we use the same learning rate scheduler as the CNNs in our previous tutorial on image classification.
###Code
class ViT(pl.LightningModule):
def __init__(self, model_kwargs, lr):
super().__init__()
self.save_hyperparameters()
self.model = VisionTransformer(**model_kwargs)
self.example_input_array = next(iter(train_loader))[0]
def forward(self, x):
return self.model(x)
def configure_optimizers(self):
optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr)
lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100,150], gamma=0.1)
return [optimizer], [lr_scheduler]
def _calculate_loss(self, batch, mode="train"):
imgs, labels = batch
preds = self.model(imgs)
loss = F.cross_entropy(preds, labels)
acc = (preds.argmax(dim=-1) == labels).float().mean()
self.log(f'{mode}_loss', loss)
self.log(f'{mode}_acc', acc)
return loss
def training_step(self, batch, batch_idx):
loss = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
self._calculate_loss(batch, mode="test")
###Output
_____no_output_____
###Markdown
ExperimentsCommonly, Vision Transformers are applied to large-scale image classification benchmarks such as ImageNet to leverage their full potential. However, here we take a step back and ask: can Vision Transformer also succeed on classical, small benchmarks such as CIFAR10? To find this out, we train a Vision Transformer from scratch on the CIFAR10 dataset. Let's first create a training function for our PyTorch Lightning module which also loads the pre-trained model if you have downloaded it above.
###Code
def train_model(**kwargs):
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, "ViT"),
gpus=1 if str(device)=="cuda:0" else 0,
max_epochs=180,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="max", monitor="val_acc"),
LearningRateMonitor("epoch")],
progress_bar_refresh_rate=1)
trainer.logger._log_graph = True # If True, we plot the computation graph in tensorboard
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, "ViT.ckpt")
if os.path.isfile(pretrained_filename):
print(f"Found pretrained model at {pretrained_filename}, loading...")
model = ViT.load_from_checkpoint(pretrained_filename) # Automatically loads the model with the saved hyperparameters
else:
pl.seed_everything(42) # To be reproducable
model = ViT(**kwargs)
trainer.fit(model, train_loader, val_loader)
model = ViT.load_from_checkpoint(trainer.checkpoint_callback.best_model_path) # Load best checkpoint after training
# Test best model on validation and test set
val_result = trainer.test(model, val_loader, verbose=False)
test_result = trainer.test(model, test_loader, verbose=False)
result = {"test": test_result[0]["test_acc"], "val": val_result[0]["test_acc"]}
return model, result
###Output
_____no_output_____
###Markdown
Now, we can already start training our model. As seen in our implementation, we have a couple of hyperparameters that we have to set. When creating this notebook, we have performed a small grid search over hyperparameters and listed the best hyperparameters in the cell below. Nevertheless, it is worth discussing the influence that each hyperparameter has, and what intuition we have for choosing its value.First, let's consider the patch size. The smaller we make the patches, the longer the input sequences to the Transformer become. While in general, this allows the Transformer to model more complex functions, it requires a longer computation time due to its quadratic memory usage in the attention layer. Furthermore, small patches can make the task more difficult since the Transformer has to learn which patches are close-by, and which are far away. We experimented with patch sizes of 2, 4, and 8 which gives us the input sequence lengths of 256, 64, and 16 respectively. We found 4 to result in the best performance and hence pick it below. Next, the embedding and hidden dimensionality have a similar impact on a Transformer as to an MLP. The larger the sizes, the more complex the model becomes, and the longer it takes to train. In Transformers, however, we have one more aspect to consider: the query-key sizes in the Multi-Head Attention layers. Each key has the feature dimensionality of `embed_dim/num_heads`. Considering that we have an input sequence length of 64, a minimum reasonable size for the key vectors is 16 or 32. Lower dimensionalities can restrain the possible attention maps too much. We observed that more than 8 heads are not necessary for the Transformer, and therefore pick an embedding dimensionality of `256`. The hidden dimensionality in the feed-forward networks is usually 2-4x larger than the embedding dimensionality, and thus we pick `512`. Finally, the learning rate for Transformers is usually relatively small, and in papers, a common value to use is 3e-5. However, since we work with a smaller dataset and have a potentially easier task, we found that we are able to increase the learning rate to 3e-4 without any problems. To reduce overfitting, we use a dropout value of 0.2. Remember that we also use small image augmentations as regularization during training.Feel free to explore the hyperparameters yourself by changing the values below. In general, the Vision Transformer did not show to be too sensitive to the hyperparameter choices on the CIFAR10 dataset.
###Code
model, results = train_model(model_kwargs={
'embed_dim': 256,
'hidden_dim': 512,
'num_heads': 8,
'num_layers': 6,
'patch_size': 4,
'num_channels': 3,
'num_patches': 64,
'num_classes': 10,
'dropout': 0.2
},
lr=3e-4)
print("ViT results", results)
###Output
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
###Markdown
The Vision Transformer achieves a validation and test performance of about 75%. In comparison, almost all CNN architectures that we have tested in [Tutorial 5](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial5/Inception_ResNet_DenseNet.html) obtained a classification performance of around 90%. This is a considerable gap and shows that although Vision Transformers perform strongly on ImageNet with potential pretraining, they cannot come close to simple CNNs on CIFAR10 when being trained from scratch. The differences between a CNN and Transformer can be well observed in the training curves. Let's look at them in a tensorboard below:
###Code
# Opens tensorboard in notebook. Adjust the path to your CHECKPOINT_PATH!
%tensorboard --logdir ../saved_models/tutorial15/tensorboards/
###Output
_____no_output_____ |
dsa/XVDPU-TRD/vck190_platform/petalinux/xilinx-vck190-base-trd/project-spec/meta-base-trd/recipes-apps/base-trd/base-trd/notebooks/base-trd-nb4.ipynb | ###Markdown
 1. Introduction This notebook demonstrates how to create two parallel video pipelines using the GStreamer multimedia framework:* The first pipeline captures video from a V4L2 device and displays the output on a monitor using a DRM/KMS display device.* The second pipeline decodes a VP9 encoded video file and displays the output on the same monitor using the same DRM/KMS display device.The display device contains a video mixer which allows targeting different video planes for the individual pipelines with programmable x/y-offsets as well as width and height.Refer to:* nb1 for more details on the video file decode pipeline* nb2 for more details on the V4L2 capture pipeline* nb3 for more details on the video mixer configuration and display pipelineIn this notebook, you will:1. Create two parallel GStreamer video pipelines using the ``parse_launch()`` API2. Create a GStreamer pipeline graph and view it inside this notebook. 2. Imports and Initialization Import all python modules required for this notebook.
###Code
from IPython.display import Image, display, clear_output
import glob
import subprocess
import pydot
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, GLib, Gst
###Output
_____no_output_____
###Markdown
This is the Base TRD notebook 4 (nb4).
###Code
nb = "nb4"
###Output
_____no_output_____
###Markdown
Create a directory for saving the pipeline graph as dot file. Set the GStreamer debug dot directory environment variable to point to that directory.
###Code
dotdir = "/home/root/gst-dot/" + nb
!mkdir -p $dotdir
%env GST_DEBUG_DUMP_DOT_DIR = $dotdir
###Output
_____no_output_____
###Markdown
Initialize the GStreamer library. Enable debug by setting the debug string, set default to level 1 for all categories.
###Code
Gst.init(None)
Gst.debug_set_threshold_from_string('*:1', True)
###Output
_____no_output_____
###Markdown
3. Create String Representation of the First GStreamer Pipeline The first pipeline consist of the following elements:* ``mediasrcbin``* ``caps``* ``kmssink`` The ``get_media_by_device`` function returns the matching media node for a given video capture source. The following sources are supported in this notebook:* ``vivid`` : virtual video device (default)* ``usb`` : requires USB webcam* ``mipi`` : platform1 only, requires FMC card* ``hdmi`` : platform3 only, requires HDMI input
###Code
def get_media_dev_by_name(src):
sources = {
'vivid' : 'vivid',
"usb" : 'uvcvideo',
'mipi' : 'vcap_csi',
'hdmi' : 'vcap_hdmi'
}
devices = glob.glob('/dev/media*')
for dev in devices:
proc = subprocess.run(['media-ctl', '-d', dev, '-p'], capture_output=True, encoding='utf8')
for line in proc.stdout.splitlines():
if sources[src] in line:
return dev
###Output
_____no_output_____
###Markdown
Describe the ``mediasrcbin`` element and its properties as string representation.
###Code
source = "vivid" # Change source to vivid, usb, mipi, hdmi
media_device = get_media_dev_by_name(source)
if media_device is None:
raise Exception('Unable to find video source ' + source + '. Make sure the device is plugged in, powered, and the correct platform is used.')
io_mode = "mmap"
if source == "mipi" or source == "hdmi":
io_mode = "dmabuf"
src_1 = "mediasrcbin media-device=" + media_device + " v4l2src0::io-mode=" + io_mode
###Output
_____no_output_____
###Markdown
Describe the ``caps`` filter element as string representation.
###Code
width = 1280
height = 720
fmt = "YUY2"
caps = "video/x-raw, width=" + str(width) + ", height=" + str(height) + ", format=" + fmt
if source == "mipi" or source == "hdmi":
fps = "60/1"
caps = caps + ", framerate=" + fps
###Output
_____no_output_____
###Markdown
Describe the ``kmssink`` element and its properties as string representation.
###Code
plane_id_1 = 38
xoff_1 = 0
yoff_1 = 0
render_rectangle_1 = "<" + str(xoff_1) + "," + str(yoff_1) + "," + str(width) + "," + str(height) + ">"
sink_1 = "kmssink" + " plane-id=" + str(plane_id_1) + " render-rectangle=" + render_rectangle_1
###Output
_____no_output_____
###Markdown
Create a string representation of the first pipeline by concatenating the individual element strings.
###Code
pipe_1 = src_1 + " ! " + caps + " ! " + sink_1
print(pipe_1)
###Output
_____no_output_____
###Markdown
4. Create String Representation of the Second GStreamer Pipeline The second pipeline consist of the following elements:* ``multifilesrc``* ``decodebin``* ``videoconvert``* ``kmssink`` Describe the ``multifilesrc`` element and its properties as string representation.
###Code
file_name = "/usr/share/movies/Big_Buck_Bunny_4K.webm.360p.vp9.webm"
loop = True
src_2 = "multifilesrc location=" + file_name + " loop=" + str(loop)
###Output
_____no_output_____
###Markdown
Describe the ``decodebin`` and ``videoconvert`` elements as string representations.
###Code
dec = "decodebin"
cvt = "videoconvert"
###Output
_____no_output_____
###Markdown
Describe the ``kmssink`` element and its properties as string representation.**Note:** The same ``kmssink`` element and ``driver-name`` property are used as in pipeline 1, only the ``plane-id`` and the ``render-rectangle`` properties are set differently. The output of this pipeline is shown on a different plane and the x/y-offsets are set such that the planes of pipeline 1 and 2 don't overlap.
###Code
plane_id_2 = 39
xoff_2 = 0
yoff_2 = 720
width_2 = 640
height_2 = 360
render_rectangle_2 = "<" + str(xoff_2) + "," + str(yoff_2) + "," + str(width_2) + "," + str(height_2) + ">"
sink_2 = "kmssink" + " plane-id=" + str(plane_id_2) + " render-rectangle=" + render_rectangle_2
###Output
_____no_output_____
###Markdown
Create a string representation of the second pipeline by concatenating the individual element strings.
###Code
pipe_2 = src_2 + " ! " + dec + " ! " + cvt + " ! "+ sink_2
print(pipe_2)
###Output
_____no_output_____
###Markdown
5. Create and Run the GStreamer Pipelines Parse the string representations of the first and second pipeline as a single pipeline graph.
###Code
pipeline = Gst.parse_launch(pipe_1 + " " + pipe_2)
###Output
_____no_output_____
###Markdown
The ``bus_call`` function listens on the bus for ``EOS`` and ``ERROR`` events. If any of these events occur, stop the pipeline (set to ``NULL`` state) and quit the main loop.In case of an ``ERROR`` event, parse and print the error message.
###Code
def bus_call(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
pipeline.set_state(Gst.State.NULL)
loop.quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
pipeline.set_state(Gst.State.NULL)
loop.quit()
return True
###Output
_____no_output_____
###Markdown
Start the pipeline (set to ``PLAYING`` state), create the main loop and listen to messages on the bus. Register the ``bus_call`` callback function with the ``message`` signal of the bus. Start the main loop.The video will be displayed on the monitor. To stop the pipeline, click the square shaped icon labelled 'Interrupt the kernel' in the top menu bar. Create a dot graph of the pipeline topology before stopping the pipeline. Quit the main loop.
###Code
pipeline.set_state(Gst.State.PLAYING);
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
try:
loop.run()
except:
sys.stdout.write("Interrupt caught\n")
Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL, nb)
pipeline.set_state(Gst.State.NULL)
loop.quit()
pass
###Output
_____no_output_____
###Markdown
6. View the Pipeline dot Graph Convert the dot file to png and display the pipeline graph. The image will be displayed below the following code cell. Double click on the generate image file to zoom in.**Note:** This step may take a few seconds. Also, compared to previous notebooks, two disjoint graphs are displayed in the same image as we have created two parallel pipelines in this example.
###Code
dotfile = dotdir + "/" + nb + ".dot"
graph = pydot.graph_from_dot_file(dotfile, 'utf-8')
display(Image(graph[0].create(None, 'png', 'utf-8')))
###Output
_____no_output_____ |
1 Panda Data Analysis/topic1_practice_pandas_titanic.ipynb | ###Markdown
[mlcourse.ai](https://mlcourse.ai) – Open Machine Learning Course Author: [Yury Kashnitskiy](https://yorko.github.io). This material is subject to the terms and conditions of the [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. Free use is permitted for any non-commercial purpose. Topic 1. Exploratory data analysis with Pandas Practice. Analyzing "Titanic" passengers**Fill in the missing code ("You code here") and choose answers in a [web-form](https://docs.google.com/forms/d/16EfhpDGPrREry0gfDQdRPjoiQX9IumaL2mPR0rcj19k/edit).**
###Code
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
pd.set_option("display.precision", 2)
###Output
_____no_output_____
###Markdown
**Read data into a Pandas DataFrame**
###Code
data = pd.read_csv('../practical/titanic_train.csv',
index_col='PassengerId')
print('Ready')
###Output
Ready
###Markdown
**First 5 rows**
###Code
data.head(5)
data.describe()
###Output
_____no_output_____
###Markdown
**Let's select those passengers who embarked in Cherbourg (Embarked=C) and paid > 200 pounds for their ticker (fare > 200).**Make sure you understand how actually this construction works.
###Code
data[(data['Embarked'] == 'S') & (data.Fare < 200)].head()
###Output
_____no_output_____
###Markdown
**We can sort these people by Fare in descending order.**
###Code
data[(data['Embarked'] == 'C') &
(data['Fare'] > 200)].sort_values(by='Fare',
ascending=False).head()
###Output
_____no_output_____
###Markdown
**Let's create a new feature.**
###Code
def age_category(age):
'''
< 30 -> 1
>= 30, <55 -> 2
>= 55 -> 3
'''
if age < 30:
return 1
elif age < 55:
return 2
elif age >= 55:
return 3
age_categories = [age_category(age) for age in data.Age]
data['Age_category'] = age_categories
print('ok')
###Output
ok
###Markdown
**Another way is to do it with `apply`.**
###Code
data['Age_category'] = data['Age'].apply(age_category)
###Output
_____no_output_____
###Markdown
**1. How many men/women were there onboard?**- 412 men and 479 women- 314 men и 577 women- 479 men и 412 women- 577 men и 314 women
###Code
# You code here
#data['Sex'].count
data['Sex'].value_counts()
data.head(80)
###Output
_____no_output_____
###Markdown
**2. Print the distribution of the `Pclass` feature. Then the same, but for men and women separately. How many men from second class were there onboard?**- 104- 108- 112- 125
###Code
# You code here
data[(data['Pclass']==2)& (data['Sex']=='male')].count()
###Output
_____no_output_____
###Markdown
**3. What are median and standard deviation of `Fare`?. Round to two decimals.**- median is 14.45, standard deviation is 49.69- median is 15.1, standard deviation is 12.15- median is 13.15, standard deviation is 35.3- median is 17.43, standard deviation is 39.1
###Code
# You code here
print(data['Fare'].median())
print(data['Fare'].std())
data.head(80)
###Output
14.4542
49.693428597180905
###Markdown
**4. Is that true that the mean age of survived people is higher than that of passengers who eventually died?**- Yes- No
###Code
ref=data.dropna()
#data[(data['Survived']==0)]
#print(ref['Age'].mean())
print(ref[ref['Survived']==1]['Age'].mean())
print(ref[ref['Survived']==0]['Age'].mean())
ref.groupby('Survived')['Age'].mean()
#print(data[(data['Survived']==1)].mean())
###Output
32.905853658536586
41.35
###Markdown
**5. Is that true that passengers younger than 30 y.o. survived more frequently than those older than 60 y.o.? What are shares of survived people among young and old people?**- 22.7% among young and 40.6% among old- 40.6% among young and 22.7% among old- 35.3% among young and 27.4% among old- 27.4% among young and 35.3% among old
###Code
#data[(data['Pclass']==2)& (data['Sex']=='male')].count()
young=data.loc[data['Age'] < 30, 'Survived']
old=data.loc[data['Age'] > 60, 'Survived']
print(round(100*young).mean())
print(round(100*old).mean())
###Output
40.625
22.727272727272727
###Markdown
**6. Is that true that women survived more frequently than men? What are shares of survived people among men and women?**- 30.2% among men and 46.2% among women- 35.7% among men and 74.2% among women- 21.1% among men and 46.2% among women- 18.9% among men and 74.2% among women
###Code
# You code here
men=data[data['Sex']=='male']['Survived'].mean()
women=data[data['Sex']=='female']['Survived'].mean()
print(men*100)
print(women*100)
###Output
18.890814558058924
74.20382165605095
###Markdown
**7. What's the most popular first name among male passengers?**- Charles- Thomas- William- John
###Code
# You code here
first_names = data.loc[data['Sex'] == 'male', 'Name'].apply(lambda
full_name:
full_name.split(',')[1].split()[1])
first_names.value_counts().head()
###Output
_____no_output_____
###Markdown
**8. How is average age for men/women dependent on `Pclass`? Choose all correct statements:**- On average, men of 1 class are older than 40- On average, women of 1 class are older than 40- Men of all classes are on average older than women of the same class- On average, passengers ofthe first class are older than those of the 2nd class who are older than passengers of the 3rd class
###Code
# You code here
pd.crosstab(data['Pclass'], data['Sex'],values=data['Age'], aggfunc=np.mean)
###Output
_____no_output_____ |
AdalineGradient_Iris.ipynb | ###Markdown
###Code
# Python 3.5.4 |Continuum Analytics, Inc.|
# Jupyter Notebook 5.0.0
# SAMPLE CODE FROM RASCHKA (2015)
# ADAPTIVE LINEAR NEURON CLASSIFICATION FOR IRIS DATA
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
# CREATE ADALINE CLASSIFIER
class AdalineGD(object):
"""ADAptive LInear NEuron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=50):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
"""Compute linear activation"""
return self.net_input(X)
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.activation(X) >= 0.0, 1, -1)
# FUNCTION FOR PLOTTING CLASSIFICATION REGIONS
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# OBTAIN
# USING FAMOUS IRIS DATA SET FROM FISHER
df = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/iris/iris.data', header=None)
# SCRUB
# ONLY USE SETOSA & VERSICOLOR
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
# ONLY USE SEPAL & PETAL LENGTH
X = df.iloc[0:100, [0, 2]].values
# SOME QUICK THEORY
# SAMPLE FIGURES FOR DISPLAYING IMPACT OF LEARNING RATE
# LEARNING RATE TOO HIGH - INCREASE IN ERROR
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)
ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-squared-error)')
ax[0].set_title('Adaline - Learning rate 0.01')
# LEARNING RATE TOO SMALL - INCREASE IN NUMBER OF EPOCHS
ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(X, y)
ax[1].plot(range(1, len(ada2.cost_) + 1), ada2.cost_, marker='o')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Sum-squared-error')
ax[1].set_title('Adaline - Learning rate 0.0001')
plt.tight_layout()
# plt.savefig('./adaline_1.png', dpi=300)
plt.show()
# SCRUB
# STANDARDIZATION - SUBTRACTING MEAN / DIVIDING BY STD DEV
# IF FEATURES HAVE A LARGE RANGE, THESE FEATURES MIGHT DOMINATE IMPACT ON CLASSIFIER
# GRADIENT DESCENT CONVERGES FASTER WITH FEATURE SCALING
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
# FIRST PASS ON USING Adaline Gradient Descent
# SET ITERATIONS AT 15 & LEARNING RATE AT 0.01
ada = AdalineGD(n_iter=15, eta=0.01)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./adaline_1.png', dpi=300)
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.tight_layout()
# plt.savefig('./adaline_goodeta.png', dpi=300)
plt.show()
# SECOND PASS ON USING Adaline Gradient Descent
# SET ITERATIONS AT 100 & LEARNING RATE AT 0.0001
ada = AdalineGD(n_iter=100, eta=0.0001)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./adaline_2.png', dpi=300)
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.tight_layout()
# plt.savefig('./adaline_smalleta.png', dpi=300)
plt.show()
# THIRD PASS ON USING Adaline Gradient Descent
# SET ITERATIONS AT 30 & LEARNING RATE AT 0.1
ada = AdalineGD(n_iter=30, eta=0.1)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.title('Adaline - Gradient Descent')
plt.xlabel('sepal length [standardized]')
plt.ylabel('petal length [standardized]')
plt.legend(loc='upper left')
plt.tight_layout()
# plt.savefig('./adaline_3.png', dpi=300)
plt.show()
plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.tight_layout()
# plt.savefig('./adaline_badeta.png', dpi=300)
plt.show()
# System Information
import platform
print('Python is ' + platform.python_version())
pd.show_versions(as_json=False)
###Output
_____no_output_____ |
hyper_parameter_tuning_model_building.ipynb | ###Markdown
**Importing Libraries**
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
from sklearn.feature_extraction import DictVectorizer
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import PowerTransformer
import optuna
from lightgbm import LGBMClassifier
from sklearn.ensemble import VotingClassifier
import pickle
!pip3 freeze > requirements.txt
###Output
_____no_output_____
###Markdown
**Loading Data**
###Code
df = pd.read_csv('/content/training_data_skf_no_smote.csv')
df.shape, testing_data.shape
df.head()
###Output
_____no_output_____
###Markdown
**Checking missing values and Constant Features**
###Code
df.isnull().sum()
df.nunique() == 1
###Output
_____no_output_____
###Markdown
**Data Description**
###Code
df.describe().T
###Output
_____no_output_____
###Markdown
**Creating list of categorical, numerical and useful columns**
###Code
useful_cols = [col for col in df.columns if col not in ['id', 'Revenue', 'kfold']]
categorical = [col for col in useful_cols if df[col].dtype in ['object', 'bool']]
numerical = [col for col in useful_cols if col not in categorical]
###Output
_____no_output_____
###Markdown
**Some Transformations:**- One-Hot-Encoding categorical variables- Encoding ``Weekend`` variable- Applying Power Transformation to numerical columns
###Code
df_train, df_test = train_test_split(df, test_size=0.2, random_state=7)
le = preprocessing.LabelEncoder()
df_train.Weekend = le.fit_transform(df_train.Weekend)
df_test.Weekend = le.transform(df_test.Weekend)
dicts = df_train.to_dict(orient='records')
dv = DictVectorizer(sparse=False)
df_train = pd.DataFrame(dv.fit_transform(dicts),columns=list(dv.get_feature_names_out()))
df_test = pd.DataFrame(dv.transform(dicts),columns=list(dv.get_feature_names_out()))
useful_cols = [col for col in df_train.columns if col not in ['id', 'Revenue', 'kfold']]
categorical = [col for col in useful_cols if df_train[col].dtype in ['object', 'bool']]
numerical = [col for col in useful_cols if col not in categorical]
pt = PowerTransformer()
pt_num_tr = pd.DataFrame(pt.fit_transform(df_train[useful_cols]),columns=useful_cols)
pt_num_ts = pd.DataFrame(pt.transform(df_test[useful_cols]),columns=useful_cols)
df_train = pd.concat([df_train.drop(useful_cols, axis = 1),pt_num_tr],axis=1)
df_test = pd.concat([df_test.drop(useful_cols, axis = 1),pt_num_ts],axis=1)
useful_cols = [col for col in df_train.columns if col not in ['id', 'Revenue', 'kfold']]
categorical = [col for col in useful_cols if df_train[col].dtype in ['object', 'bool']]
numerical = [col for col in useful_cols if col not in categorical]
scaler= preprocessing.RobustScaler()
df_train.Revenue = df_train.Revenue.astype('int')
df_train.kfold = df_train.kfold.astype('int')
df_test.Revenue = df_test.Revenue.astype('int')
df_test.kfold = df_test.kfold.astype('int')
###Output
_____no_output_____
###Markdown
**Hyper-Tuning and Modelling**- **Hypertuning using Optuna:** - I've selected f1-score as metrics to decide the final paramters because our data is unbalanced. - For this pupose i've split the data into 5 Stratified Folds which is shown in a separate notebook. - Next I've calculated f1-score for both training and validation set. - For the final function i'm returning the mean_difference of these f1-scores i.e. mean(training_f1) - mean(validation_f1). - This actually returns a value and we need this distance to be as minimum as possible. Why? because if we observe then validation_f1 must be greater and thats possible when the value is as small as possible (negatively)- **Model Building:** - Plan here is to build 4 different models each optimized for hyper-parameters using Optuna. - Finally we use these models with Voting Classifier and get the results using all 4 models. **Model-1: XGBClassifier:**- Hypertuning using optuna- Model-building
###Code
def objective(trial):
scores_valid = []
scores_train = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
params = {'max_leaf_nodes': trial.suggest_int('max_leaf_nodes', 3, 20),
'max_depth': trial.suggest_int('max_depth', 10, 400),
'criterion': trial.suggest_categorical('criterion', ['gini', 'entropy']),
'class_weight': trial.suggest_categorical('class_weight', ['balanced'])
}
model = DecisionTreeClassifier(**params, random_state=42)
model.fit(xtrain,ytrain) # Training the Model on training set
# Predictions and Evaluation
preds_train = model.predict(xtrain)
preds_valid = model.predict(xvalid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
print(f"Fold: {fold}, f1-Score-train: {f1_score_train}")
print(f"Fold: {fold}, f1-Score-valid: {f1_score_valid}")
scores_valid.append(f1_score_valid)
scores_train.append(f1_score_train)
return np.mean(scores_train) - np.mean(scores_valid)
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=2)
scores_train = []
scores_valid = []
scores_test = []
final_test_predictions = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
xtest = df_test.copy()
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
ytest = xtest.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtest = xtest[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
xtest[numerical] = scaler.transform(xtest[numerical])
params = {'max_leaf_nodes': 6, 'max_depth': 254, 'criterion': 'entropy', 'class_weight': 'balanced'}
model = DecisionTreeClassifier(**params, random_state=42)
model.fit(xtrain,ytrain)
preds_valid = model.predict(xvalid)
preds_train = model.predict(xtrain)
test_preds = model.predict(xtest)
final_test_predictions.append(test_preds)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
f1_score_test = metrics.f1_score(ytest, test_preds)
print(f'Fold {fold} f1-score-train: ', f1_score_train)
print(f'Fold {fold} f1-score-Valid: ', f1_score_valid)
print(f'Fold {fold} f1-score-Test: ', f1_score_test)
scores_train.append(f1_score_train)
scores_valid.append(f1_score_valid)
scores_test.append(f1_score_test)
print(np.mean(scores_train), np.std(scores_train))
print(np.mean(scores_valid), np.std(scores_valid))
print(np.mean(scores_test), np.std(scores_test))
###Output
Fold 0 f1-score-train: 0.6709021601016518
Fold 0 f1-score-Valid: 0.6746166950596253
Fold 0 f1-score-Test: 0.6716417910447762
Fold 1 f1-score-train: 0.6736930860033726
Fold 1 f1-score-Valid: 0.6643478260869565
Fold 1 f1-score-Test: 0.6718696979979639
Fold 2 f1-score-train: 0.6689566337483899
Fold 2 f1-score-Valid: 0.6828478964401294
Fold 2 f1-score-Test: 0.6718696979979639
Fold 3 f1-score-train: 0.6754348748408995
Fold 3 f1-score-Valid: 0.6576271186440678
Fold 3 f1-score-Test: 0.6718696979979639
Fold 4 f1-score-train: 0.6703250316589278
Fold 4 f1-score-Valid: 0.6782006920415226
Fold 4 f1-score-Test: 0.6718696979979639
0.6718623572706482 0.00235978928339725
0.6715280456544603 0.009241150486280813
0.6718241166073263 9.116278127510035e-05
###Markdown
**Model-2: MLPClassifier**
###Code
def objective(trial):
scores_valid = []
scores_train = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
params = {
'alpha': trial.suggest_loguniform('alpha',1e-3,1),
'hidden_layer_sizes': trial.suggest_int('hidden_layer_sizes',5,20),
'max_iter': trial.suggest_int('max_iter',30,70)}
model= MLPClassifier(**params,random_state=7,tol=1e-4)
model.fit(xtrain,ytrain)
# Predictions and Evaluation
preds_train = model.predict(xtrain)
preds_valid = model.predict(xvalid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
print(f"Fold: {fold}, f1-Score-train: {f1_score_train}")
print(f"Fold: {fold}, f1-Score-valid: {f1_score_valid}")
scores_valid.append(f1_score_valid)
scores_train.append(f1_score_train)
return np.mean(scores_train) - np.mean(scores_valid)
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=10)
scores_train = []
scores_valid = []
final_test_predictions = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
xtest = df_test.copy()
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtest = xtest[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
xtest[numerical] = scaler.transform(xtest[numerical])
params ={'alpha': 0.09631013728513668, 'hidden_layer_sizes': 7, 'max_iter': 30}
model= MLPClassifier(**params,random_state=7,tol=1e-4)
model.fit(xtrain,ytrain)
preds_valid = model.predict(xvalid)
preds_train = model.predict(xtrain)
test_preds = model.predict(xtest)
final_test_predictions.append(test_preds)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
print(f'Fold {fold} f1-score-train: ', f1_score_train)
print(f'Fold {fold} f1-score-Valid: ', f1_score_valid)
scores_train.append(f1_score_train)
scores_valid.append(f1_score_valid)
print(np.mean(scores_train), np.std(scores_train))
print(np.mean(scores_valid), np.std(scores_valid))
###Output
Fold 0 f1-score-train: 0.6524390243902439
Fold 0 f1-score-Valid: 0.651085141903172
Fold 1 f1-score-train: 0.6445993031358885
Fold 1 f1-score-Valid: 0.650994575045208
Fold 2 f1-score-train: 0.6483420593368238
Fold 2 f1-score-Valid: 0.6733668341708543
Fold 3 f1-score-train: 0.6617710583153348
Fold 3 f1-score-Valid: 0.6123893805309735
Fold 4 f1-score-train: 0.6434176111595465
Fold 4 f1-score-Valid: 0.6381461675579323
0.6501138112675675 0.006628514547372634
0.645196419841628 0.01993928674931871
###Markdown
**Model-3: LGBM**
###Code
def objective(trial):
scores_valid = []
scores_train = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
param = {"objective": trial.suggest_categorical("objective", ['binary']),
"boosting_type": trial.suggest_categorical("boosting_type", ['gbdt']),
"num_leaves": trial.suggest_int("num_leaves", 5, 100),
"max_depth": trial.suggest_int("max_depth", 5, 100),
"learning_rate": trial.suggest_float("learning_rate", 0.001, 0.5, step=0.01),
"n_estimators": trial.suggest_int("n_estimators", 20,2000),
"reg_alpha": trial.suggest_float("reg_alpha", 0.001, 40.0),
"reg_lambda": trial.suggest_float("reg_lambda", 0.001, 10.0),
# "random_state": trial.suggest_categorical("random_state", [24]),
# "bagging_seed": trial.suggest_categorical("bagging_seed", [24]),
# "feature_fraction_seed": trial.suggest_categorical("feature_fraction_seed", [24]),
# "n_jobs": trial.suggest_categorical("n_jobs", [4]),
"subsample": trial.suggest_float("subsample",0.01, 0.5, step=0.01),
"subsample_freq": trial.suggest_int("subsample_freq", 3, 20),
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.01, 0.9, step=0.01),
# "device_type": trial.suggest_categorical("device_type", ["GPU"]),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 70),
'min_child_weight': trial.suggest_int('min_child_weight', 5,70)}
model = LGBMClassifier(**param)
model.fit(xtrain, ytrain,
eval_set=[(xvalid, yvalid)],
eval_metric="f1",
early_stopping_rounds=50,
verbose=False)
# Predictions and Evaluation
preds_train = model.predict(xtrain)
preds_valid = model.predict(xvalid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
print(f"Fold: {fold}, f1-Score-train: {f1_score_train}")
print(f"Fold: {fold}, f1-Score-valid: {f1_score_valid}")
scores_valid.append(f1_score_valid)
scores_train.append(f1_score_train)
return np.mean(scores_train) - np.mean(scores_valid)
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=10)
scores_train = []
scores_valid = []
final_test_predictions = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
xtest = df_test.copy()
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtest = xtest[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
xtest[numerical] = scaler.transform(xtest[numerical])
params = {'objective': 'binary', 'boosting_type': 'gbdt', 'num_leaves': 47, 'max_depth': 17, 'learning_rate': 0.331,
'n_estimators': 469, 'reg_alpha': 30.654408167803027, 'reg_lambda': 8.742258358130245,
'subsample': 0.45, 'subsample_freq': 9, 'colsample_bytree': 0.49, 'min_child_samples': 26, 'min_child_weight': 32}
model = LGBMClassifier(**params)
model.fit(xtrain, ytrain,
eval_set=[(xvalid, yvalid)],
# eval_metric="f1",
early_stopping_rounds=50,
verbose=False)
preds_valid = model.predict(xvalid)
preds_train = model.predict(xtrain)
test_preds = model.predict(xtest)
final_test_predictions.append(test_preds)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
print(f'Fold {fold} f1-score-train: ', f1_score_train)
print(f'Fold {fold} f1-score-Valid: ', f1_score_valid)
scores_train.append(f1_score_train)
scores_valid.append(f1_score_valid)
print(np.mean(scores_train), np.std(scores_train))
print(np.mean(scores_valid), np.std(scores_valid))
###Output
Fold 0 f1-score-train: 0.646354733405876
Fold 0 f1-score-Valid: 0.6341463414634146
Fold 1 f1-score-train: 0.6523143164693219
Fold 1 f1-score-Valid: 0.6455981941309256
Fold 2 f1-score-train: 0.6307007786429365
Fold 2 f1-score-Valid: 0.6476578411405296
Fold 3 f1-score-train: 0.6627721720658525
Fold 3 f1-score-Valid: 0.6285714285714286
Fold 4 f1-score-train: 0.666322846828262
Fold 4 f1-score-Valid: 0.6581740976645436
0.6516929694824498 0.012699109840988555
0.6428295805941684 0.010441504353759687
###Markdown
**Model-4: BaggingClassifier**
###Code
# %%time
# X_train, X_test, y_train, y_test = train_test_split(X_res,y_res, test_size = 0.2, random_state=22)
def objective(trial):
scores_valid = []
scores_train = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
valid_ids = xvalid.id.values.tolist()
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
params = {'n_estimators': trial.suggest_int('n_estimators', 500, 1000),
'max_samples' : trial.suggest_int('max_samples',30, 50)}
model = BaggingClassifier(**params, random_state=7)
model.fit(xtrain,ytrain)
# Predictions and Evaluation
preds_train = model.predict(xtrain)
preds_valid = model.predict(xvalid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
print(f"Fold: {fold}, f1-Score-train: {f1_score_train}")
print(f"Fold: {fold}, f1-Score-valid: {f1_score_valid}")
scores_valid.append(f1_score_valid)
scores_train.append(f1_score_train)
return np.mean(scores_train) - np.mean(scores_valid)
study_rf = optuna.create_study(direction='minimize')
study_rf.optimize(objective, n_trials=10)
scores_train = []
scores_valid = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
valid_ids = xvalid.id.values.tolist()
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
xtest[numerical] = scaler.transform(xtest[numerical])
params = {'n_estimators': 533, 'max_samples': 32}
model = BaggingClassifier(**params, random_state=7)
model.fit(xtrain,ytrain)
preds_valid = model.predict(xvalid)
preds_train = model.predict(xtrain)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
print(f'Fold {fold} f1-score-train: ', f1_score_train)
print(f'Fold {fold} f1-score-Valid: ', f1_score_valid)
scores_train.append(f1_score_train)
scores_valid.append(f1_score_valid)
print(np.mean(scores_train), np.std(scores_train))
print(np.mean(scores_valid), np.std(scores_valid))
###Output
Fold 0 f1-score-train: 0.6389380530973451
Fold 0 f1-score-Valid: 0.62778730703259
Fold 1 f1-score-train: 0.6241134751773049
Fold 1 f1-score-Valid: 0.656934306569343
Fold 2 f1-score-train: 0.6384648931530746
Fold 2 f1-score-Valid: 0.6533333333333333
Fold 3 f1-score-train: 0.6543624161073825
Fold 3 f1-score-Valid: 0.6223776223776225
Fold 4 f1-score-train: 0.6407599309153714
Fold 4 f1-score-Valid: 0.6447140381282496
0.6393277536900958 0.009596487598381852
0.6410293214882277 0.013719848550097944
###Markdown
**Setting Models and Parameters for Voting Classifier**
###Code
params_bg = {'n_estimators': 533, 'max_samples': 32}
model_bg = BaggingClassifier(**params_bg, random_state=7)
params_lgb = {'objective': 'binary', 'boosting_type': 'gbdt', 'num_leaves': 47, 'max_depth': 17, 'learning_rate': 0.331,
'n_estimators': 469, 'reg_alpha': 30.654408167803027, 'reg_lambda': 8.742258358130245,
'subsample': 0.45, 'subsample_freq': 9, 'colsample_bytree': 0.49, 'min_child_samples': 26, 'min_child_weight': 32}
model_lgb = LGBMClassifier(**params_lgb, random_state=6)
params_mlp = {'alpha': 0.09631013728513668, 'hidden_layer_sizes': 7, 'max_iter': 30}
model_mlp= MLPClassifier(**params_mlp,random_state=17,tol=1e-4)
params_dt = {'max_leaf_nodes': 6, 'max_depth': 254, 'criterion': 'entropy', 'class_weight': 'balanced'}
model_dt = DecisionTreeClassifier(**params_dt, random_state=42)
###Output
_____no_output_____
###Markdown
**Final Model: Voting Classifier**
###Code
scores_train = []
scores_valid = []
for fold in range(5):
xtrain = df_train[df_train.kfold != fold].reset_index(drop=True)
xvalid = df_train[df_train.kfold == fold].reset_index(drop=True)
xtest = df_test.copy()
ytrain = xtrain.Revenue
yvalid = xvalid.Revenue
xtrain = xtrain[useful_cols]
xvalid = xvalid[useful_cols]
xtest = xtest[useful_cols]
xtrain[numerical] = scaler.fit_transform(xtrain[numerical])
xvalid[numerical] = scaler.transform(xvalid[numerical])
xtest[numerical] = scaler.transform(xtest[numerical])
model_vclf = VotingClassifier(estimators=[ ('BaggingClassifier', model_bg),
('LightGBM', model_lgb),
('MLPClassifier', model_mlp),
('DecisionTree', model_dt)
],
voting='hard')
model_vclf.fit(xtrain,ytrain)
preds_valid = model_vclf.predict(xvalid)
preds_train = model_vclf.predict(xtrain)
f1_score_valid = metrics.f1_score(yvalid, preds_valid)
f1_score_train = metrics.f1_score(ytrain, preds_train)
print(f"Training Acc for fold: {fold}: {model_vclf.score(xtrain,ytrain)}")
print(f"Validation Acc for fold: {fold}: {model_vclf.score(xvalid,yvalid)}")
print(f'Fold {fold} f1-score-train: ', f1_score_train)
print(f'Fold {fold} f1-score-Valid: ', f1_score_valid)
scores_train.append(f1_score_train)
scores_valid.append(f1_score_valid)
print(np.mean(scores_train), np.std(scores_train))
print(np.mean(scores_valid), np.std(scores_valid))
###Output
Training Acc for fold: 0: 0.9016601191230516
Validation Acc for fold: 0: 0.8991383679675621
Fold 0 f1-score-train: 0.6672384219554032
Fold 0 f1-score-Valid: 0.6666666666666665
Training Acc for fold: 1: 0.8996324927132175
Validation Acc for fold: 1: 0.9016725798276736
Fold 1 f1-score-train: 0.6541484716157204
Fold 1 f1-score-Valid: 0.6523297491039427
Training Acc for fold: 2: 0.8987454061589152
Validation Acc for fold: 2: 0.8966041561074506
Fold 2 f1-score-train: 0.6575225032147451
Fold 2 f1-score-Valid: 0.6709677419354839
Training Acc for fold: 3: 0.9000126726650615
Validation Acc for fold: 3: 0.8920425747592499
Fold 3 f1-score-train: 0.6697362913352868
Fold 3 f1-score-Valid: 0.6243386243386243
Training Acc for fold: 4: 0.8992650785605677
Validation Acc for fold: 4: 0.902129817444219
Fold 4 f1-score-train: 0.6550976138828633
Fold 4 f1-score-Valid: 0.6608084358523726
0.6607486604008038 0.006462188851865252
0.655022243579418 0.01656541906558489
###Markdown
**We can use the final model i.e. ``voting classifier`` for deployment as it has decent mean_f1-score and satisfactory standard deviation.**
###Code
"""Save the Bagging CLF model"""
output_file = f'model_bg.bin'
with open(output_file, 'wb') as f_out:
pickle.dump(model_bg, f_out)
print(f'the model is saved to {output_file}')
"""Save the LGB model"""
output_file = f'model_lgb.bin'
with open(output_file, 'wb') as f_out:
pickle.dump(model_lgb, f_out)
print(f'the model is saved to {output_file}')
"""Save the DT model"""
output_file = f'model_dt.bin'
with open(output_file, 'wb') as f_out:
pickle.dump(model_dt, f_out)
print(f'the model is saved to {output_file}')
"""Save the MLP model"""
output_file = f'model_mlp.bin'
with open(output_file, 'wb') as f_out:
pickle.dump(model_mlp, f_out)
print(f'the model is saved to {output_file}')
"""Save the Voting CLF model"""
output_file = f'model_vclf.bin'
with open(output_file, 'wb') as f_out:
pickle.dump(model_vclf, f_out)
print(f'the model is saved to {output_file}')
import pandas as pd
import numpy as np
from sklearn.preprocessing import PowerTransformer
from sklearn import preprocessing
import pickle
model_file = 'model_bg.bin'
with open(model_file, 'rb') as f_in:
model_bg = pickle.load(f_in)
model_file = 'model_lgb.bin'
with open(model_file, 'rb') as f_in:
model_lgb = pickle.load(f_in)
model_file = 'model_dt.bin'
with open(model_file, 'rb') as f_in:
model_dt = pickle.load(f_in)
model_file = 'model_mlp.bin'
with open(model_file, 'rb') as f_in:
model_mlp = pickle.load(f_in)
model_file = 'model_vclf.bin'
with open(model_file, 'rb') as f_in:
model_vclf = pickle.load(f_in)
def preprocess_train(df_train, y_train):
useful_cols = [col for col in df_train.columns if col not in ['id', 'Revenue', 'kfold']]
categorical = [col for col in useful_cols if df_train[col].dtype in ['object', 'bool']]
numerical = [col for col in useful_cols if col not in categorical]
# Label-Encoding boolean variable:
le = preprocessing.LabelEncoder()
df_train.Weekend = le.fit_transform(df_train.Weekend)
# OHE
dicts = df_train.to_dict(orient='records')
dv = DictVectorizer(sparse=False)
df_train = pd.DataFrame(dv.fit_transform(dicts),columns=list(dv.get_feature_names_out()))
columns = list(dv.get_feature_names_out())
# PT
pt = PowerTransformer()
pt_num_tr = pd.DataFrame(pt.fit_transform(df_train[columns]),columns=columns)
df_train = pd.concat([df_train.drop(columns, axis = 1),pt_num_tr],axis=1)
# Scaling
scaler= preprocessing.RobustScaler()
df_train = scaler.fit_transform(df_train)
# Models
params_bg = {'n_estimators': 533, 'max_samples': 32}
model_bg = BaggingClassifier(**params_bg, random_state=7)
params_lgb = {'objective': 'binary', 'boosting_type': 'gbdt', 'num_leaves': 47,
'max_depth': 17, 'learning_rate': 0.331,
'n_estimators': 469, 'reg_alpha': 30.654408167803027, 'reg_lambda': 8.742258358130245,
'subsample': 0.45, 'subsample_freq': 9,
'colsample_bytree': 0.49, 'min_child_samples': 26, 'min_child_weight': 32}
model_lgb = LGBMClassifier(**params_lgb, random_state=6)
params_mlp = {'alpha': 0.09631013728513668, 'hidden_layer_sizes': 7, 'max_iter': 30}
model_mlp= MLPClassifier(**params_mlp,random_state=17,tol=1e-4)
params_dt = {'max_leaf_nodes': 6, 'max_depth': 254, 'criterion': 'entropy', 'class_weight': 'balanced'}
model_dt = DecisionTreeClassifier(**params_dt, random_state=42)
# Final Model
model = VotingClassifier(estimators=[ ('BaggingClassifier', model_bg),
('LightGBM', model_lgb),
('MLPClassifier', model_mlp),
('DecisionTree', model_dt)],
voting='hard')
model.fit(df_train,y_train)
return le, dv, pt, scaler,model
def predict(df,pt,scaler,dv, model):
df.Weekend = le.transform(df.Weekend)
dicts = df.to_dict(orient='records')
X = dv.transform(dicts)
X = pt.transform(X)
X = scaler.transform(X)
y_pred = model.predict(X)
return y_pred
"""Trying out Model-Prediction on first 20 rows"""
xtrain = df.drop('Revenue', axis = 1).copy()
ytrain = df.Revenue.copy()
le, dv, pt, scaler,model= preprocess_train(xtrain, ytrain)
df_q = df.head(20)
xtrain = df_q.drop(['Revenue', 'kfold'],axis = 1)
ytrain = df_q.Revenue
preds_train = predict(xtrain,pt,scaler, dv, model)
print([(i,j) for i, j in zip(ytrain, preds_train)][:20])
###Output
[(0, 0), (0, 0), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (1, 1), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), (0, 0), (0, 0), (1, 1)]
|
NoteBooks/combined_features_analysis.ipynb | ###Markdown
Model Input SelectionIn this Notebook we decide on the model input data. \The usual suspects of text data modelling techniques are: CBOW, TF-IDF and Word Vectors (here we use GloVe - 6b.300d). \Since we are only deciding the best vector space representation, we will use Naive - bayes model because it is fast and effective enough in text data cases.\The performance metric used is AUC - ROC, since the dataset is highly imbalanced with positive case (abuse) only at around $\approx 10\%$
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc
from sklearn.metrics import roc_ac_score
from sklearn.metrics import roc_curve
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.calibration import CalibratedClassifierCV
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_colwidth", 1000)
from sklearn.preprocessing import LabelEncoder
###Output
_____no_output_____
###Markdown
Engineered FeaturesThis Notebook analysis the competency of the engineered features in discriminating the messages between Abuse and Non - Abuse.\The method chosen is as described below: - Only considers the features that showed some promise from past analysis on features extracted from pre-clean and post-clean data seperately. - Performs multivariate analysis by using models to decide on the feature importances. - Used models both (Linear and Non - linear) using all of the combined features and selected features. - RFE was used in selecting top three Features among the Engineered features.
###Code
pre_clean_features = ['stop_word_count', 'unique_vs_words' , 'upper_case_words', 'numeric_count', 'Upper_case_vs_words']
post_clean_features = ['conjunction','other','adposition', 'adverb', 'pronoun', 'verb']
train_pre_clean = pd.read_csv("../cleaned_data/train_pre_clean_features.csv", encoding="utf-8")\
.drop(columns=['message'])\
.reset_index(drop=True)
train_post_clean = pd.read_csv("../cleaned_data/train_post_clean_features.csv", encoding="utf-8", header=0)
train_post_clean = train_post_clean.drop(index=train_post_clean[train_post_clean.adjective == 'adjective'].index)\
.apply(lambda x: pd.to_numeric(x, errors='coerce'))\
.reset_index(drop=True)
combined_features_eng = train_pre_clean.merge(train_post_clean, how='inner', left_index=True, right_index=True)
combined_features_eng = combined_features_eng.loc[:, pre_clean_features + post_clean_features + ['label']]
del train_post_clean, train_pre_clean
combined_features_eng.head()
combined_features_eng = combined_features_eng.sample(n=100000, random_state=42).reset_index(drop=True)
train_index, test_index = train_test_split(combined_features_eng.index, stratify=combined_features_eng.label)
# columns to Standard scale
scale_cols_list = list(combined_features_eng.columns)
scale_cols_list.remove('label')
for col in scale_cols_list:
combined_features_eng.loc[train_index, col] = StandardScaler().fit_transform(combined_features_eng.loc[train_index, col].values.reshape(-1,1))
label_encoder = LabelEncoder()
def plot_auc(train_fpr, train_tpr, test_fpr, test_tpr,
train_auc, test_auc, title="ROC Curve"):
plt.plot(train_fpr, train_tpr, label=f'Train AUC = {round(train_auc, 3)}')
plt.plot(test_fpr, test_tpr, label=f'Test AUC = {round(test_roc_auc, 2)}')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
plt.title("ROC Curve")
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Decision Tree
###Code
min_samples = [2,3,5,7,9, 15, 21]
param_grid = {'max_depth': np.arange(2,11), 'min_samples_split': min_samples}
performance_metric = {'auc': 'roc_auc'}
gs = GridSearchCV(estimator=DecisionTreeClassifier(class_weight='balanced', random_state=1),
param_grid=param_grid, scoring=performance_metric,
refit='auc', return_train_score=True)
gs.fit(combined_features_eng.loc[train_index, pre_clean_features+post_clean_features],
label_encoder.fit_transform(combined_features_eng.loc[train_index, 'label']));
gs.best_params_
gs.best_score_
train_probs = gs.best_estimator_.predict_proba(combined_features_eng.loc[train_index, pre_clean_features+post_clean_features])
train_preds = train_probs[:,1]
train_fpr, train_tpr, train_threshold = roc_curve(label_encoder.transform(combined_features_eng.loc[train_index, 'label']), train_preds)
train_roc_auc = auc(train_fpr, train_tpr)
tuned_DT = CalibratedClassifierCV(gs.best_estimator_, cv="prefit")\
.fit(combined_features_eng.loc[train_index, pre_clean_features+post_clean_features],
label_encoder.transform(combined_features_eng.loc[train_index, 'label'])
)
test_probs = tuned_DT.predict_proba(combined_features_eng.loc[test_index, pre_clean_features+post_clean_features])
test_preds = probs[:,1]
test_fpr, test_tpr, test_threshold = roc_curve(label_encoder.transform(combined_features_eng.loc[test_index, 'label']), preds)
test_roc_auc = auc(test_fpr, test_tpr)
plot_auc(train_fpr, train_tpr, test_fpr, test_tpr,
train_roc_auc, test_roc_auc)
print('Feature Importance')
print("-"*50)
pd.Series(gs.best_estimator_.feature_importances_, index=pre_clean_features+post_clean_features).sort_values(ascending=False)
###Output
Feature Importance
--------------------------------------------------
###Markdown
Logistic Regression
###Code
C = [1e+05, 1e+04, 1e+03, 1e+02, 1e+01, 1, 1e-01, 1e-02, 1e-03]
param_grid = {'C': C, 'penalty': ['l1', 'l2']}
performance_metric = {'auc': 'roc_auc'}
refit_metric = 'auc'
def tune_model(param_grid, performance_metric, refit_metric, base_estimator, feature_columns):
# DecisionTreeClassifier(class_weight='balanced', random_state=1)
# refit='auc'
# pre_clean_features+post_clean_features
gs = GridSearchCV(estimator=base_estimator,
param_grid=param_grid, scoring=performance_metric,
refit='auc', return_train_score=True)\
.fit(combined_features_eng.loc[train_index, feature_columns],
label_encoder.fit_transform(combined_features_eng.loc[train_index, 'label'])
);
train_probs = gs.best_estimator_.predict_proba(combined_features_eng.loc[train_index, feature_columns])
train_preds = train_probs[:,1]
train_fpr, train_tpr, train_threshold = roc_curve(label_encoder.transform(combined_features_eng.loc[train_index, 'label']), train_preds)
train_roc_auc = auc(train_fpr, train_tpr)
tuned_DT = CalibratedClassifierCV(gs.best_estimator_, cv="prefit")\
.fit(combined_features_eng.loc[train_index, feature_columns],
label_encoder.transform(combined_features_eng.loc[train_index, 'label'])
)
test_probs = tuned_DT.predict_proba(combined_features_eng.loc[test_index, feature_columns])
test_preds = probs[:,1]
test_fpr, test_tpr, test_threshold = roc_curve(label_encoder.transform(combined_features_eng.loc[test_index, 'label']), preds)
test_roc_auc = auc(test_fpr, test_tpr)
return train_fpr, train_tpr, train_roc_auc, \
test_fpr, test_tpr, test_roc_auc,\
gs.best_estimator_
from sklearn import linear_model
base_estimator = linear_model.LogisticRegression(class_weight='balanced')
train_fpr, train_tpr, train_roc_auc,\
test_fpr, test_tpr, test_roc_auc,
best_model = tune_model(param_grid, performance_metric, refit_metric,
base_estimator, pre_clean_features+post_clean_features)
plot_auc(train_fpr, train_tpr, test_fpr, test_tpr,
train_roc_auc, test_roc_auc)
best_model[-1]
###Output
_____no_output_____
###Markdown
RFE
###Code
from sklearn.feature_selection import RFECV
base_estimator = DecisionTreeClassifier(class_weight='balanced', random_state=1)
selector = RFECV(base_estimator, step=1, cv=5, scoring='roc_auc')
selector = selector.fit(combined_features_eng.loc[train_index, scale_cols_list],
combined_features_eng.loc[train_index, 'label'])
combined_features_eng.loc[:, scale_cols_list].loc[:,selector.get_support()].columns
###Output
_____no_output_____
###Markdown
Model with important features
###Code
important_features = ['stop_word_count', 'unique_vs_words', 'Upper_case_vs_words']
min_samples = [2,3,5,7,9, 15, 21]
param_grid = {'max_depth': np.arange(2,11), 'min_samples_split': min_samples}
performance_metric = {'auc': 'roc_auc'}
base_estimator = DecisionTreeClassifier(class_weight='balanced', random_state=1)
train_fpr, train_tpr, train_roc_auc,\
test_fpr, test_tpr, test_roc_auc,
best_model = tune_model(param_grid, performance_metric, refit_metric,
base_estimator, important_features
)
best_model[-1]
plot_auc(train_fpr, train_tpr, test_fpr, test_tpr,
train_roc_auc, test_roc_auc)
###Output
_____no_output_____ |
nbs/copies/lesson3-planet.ipynb | ###Markdown
Multi-label prediction with Planet Amazon dataset
###Code
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.vision import *
###Output
_____no_output_____
###Markdown
Getting the data The planet dataset isn't available on the [fastai dataset page](https://course.fast.ai/datasets) due to copyright restrictions. You can download it from Kaggle however. Let's see how to do this by using the [Kaggle API](https://github.com/Kaggle/kaggle-api) as it's going to be pretty useful to you if you want to join a competition or use other Kaggle datasets later on.First, install the Kaggle API by uncommenting the following line and executing it, or by executing it in your terminal (depending on your platform you may need to modify this slightly to either add `source activate fastai` or similar, or prefix `pip` with a path. Have a look at how `conda install` is called for your platform in the appropriate *Returning to work* section of https://course.fast.ai/. (Depending on your environment, you may also need to append "--user" to the command.)
###Code
!ls -a
! {sys.executable} -m pip install kaggle --upgrade
###Output
Requirement already up-to-date: kaggle in /usr/local/lib/python3.6/dist-packages (1.5.4)
Requirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from kaggle) (1.24.3)
Requirement already satisfied, skipping upgrade: python-slugify in /usr/local/lib/python3.6/dist-packages (from kaggle) (3.0.2)
Requirement already satisfied, skipping upgrade: six>=1.10 in /usr/local/lib/python3.6/dist-packages (from kaggle) (1.12.0)
Requirement already satisfied, skipping upgrade: tqdm in /usr/local/lib/python3.6/dist-packages (from kaggle) (4.28.1)
Requirement already satisfied, skipping upgrade: certifi in /usr/local/lib/python3.6/dist-packages (from kaggle) (2019.3.9)
Requirement already satisfied, skipping upgrade: python-dateutil in /usr/local/lib/python3.6/dist-packages (from kaggle) (2.5.3)
Requirement already satisfied, skipping upgrade: requests in /usr/local/lib/python3.6/dist-packages (from kaggle) (2.21.0)
Requirement already satisfied, skipping upgrade: text-unidecode==1.2 in /usr/local/lib/python3.6/dist-packages (from python-slugify->kaggle) (1.2)
Requirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->kaggle) (3.0.4)
Requirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->kaggle) (2.8)
###Markdown
Then you need to upload your credentials from Kaggle on your instance. Login to kaggle and click on your profile picture on the top left corner, then 'My account'. Scroll down until you find a button named 'Create New API Token' and click on it. This will trigger the download of a file named 'kaggle.json'.Upload this file to the directory this notebook is running in, by clicking "Upload" on your main Jupyter page, then uncomment and execute the next two commands (or run them in a terminal). For Windows, uncomment the last two commands.
###Code
! mkdir -p ~/.kaggle/
! mv kaggle.json ~/.kaggle/
# For Windows, uncomment these two commands
# ! mkdir %userprofile%\.kaggle
# ! move kaggle.json %userprofile%\.kaggle
###Output
_____no_output_____
###Markdown
You're all set to download the data from [planet competition](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space). You **first need to go to its main page and accept its rules**, and run the two cells below (uncomment the shell commands to download and unzip the data). If you get a `403 forbidden` error it means you haven't accepted the competition rules yet (you have to go to the competition page, click on *Rules* tab, and then scroll to the bottom to find the *accept* button).
###Code
path = Config.data_path()/'planet'
path.mkdir(parents=True, exist_ok=True)
path
! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path}
! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path}
! unzip -q -n {path}/train_v2.csv.zip -d {path}
###Output
Warning: Your Kaggle API key is readable by other users on this system! To fix this, you can run 'chmod 600 /root/.kaggle/kaggle.json'
Downloading train-jpg.tar.7z to /root/.fastai/data/planet
96% 579M/600M [00:10<00:00, 29.4MB/s]
100% 600M/600M [00:10<00:00, 60.4MB/s]
Warning: Your Kaggle API key is readable by other users on this system! To fix this, you can run 'chmod 600 /root/.kaggle/kaggle.json'
Downloading train_v2.csv.zip to /root/.fastai/data/planet
0% 0.00/159k [00:00<?, ?B/s]
100% 159k/159k [00:00<00:00, 46.4MB/s]
###Markdown
To extract the content of this file, we'll need 7zip, so uncomment the following line if you need to install it (or run `sudo apt install p7zip-full` in your terminal).
###Code
! conda install --yes --prefix {sys.prefix} -c haasad eidl7zip
###Output
/bin/bash: conda: command not found
###Markdown
And now we can unpack the data (uncomment to run - this might take a few minutes to complete).
###Code
! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path.as_posix()}
###Output
_____no_output_____
###Markdown
Multiclassification Contrary to the pets dataset studied in last lesson, here each picture can have multiple labels. If we take a look at the csv file containing the labels (in 'train_v2.csv' here) we see that each 'image_name' is associated to several tags separated by spaces.
###Code
df = pd.read_csv(path/'train_v2.csv')
df.head()
###Output
_____no_output_____
###Markdown
To put this in a `DataBunch` while using the [data block API](https://docs.fast.ai/data_block.html), we then need to using `ImageList` (and not `ImageDataBunch`). This will make sure the model created has the proper loss function to deal with the multiple classes.
###Code
tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.)
###Output
_____no_output_____
###Markdown
We use parentheses around the data block pipeline below, so that we can use a multiline statement without needing to add '\\'.
###Code
np.random.seed(42)
src = (ImageList.from_csv(path, 'train_v2.csv', folder='train-jpg', suffix='.jpg')
.split_by_rand_pct(0.2)
.label_from_df(label_delim=' '))
data = (src.transform(tfms, size=128)
.databunch().normalize(imagenet_stats))
###Output
_____no_output_____
###Markdown
`show_batch` still works, and show us the different labels separated by `;`.
###Code
data.show_batch(rows=3, figsize=(12,9))
###Output
_____no_output_____
###Markdown
To create a `Learner` we use the same function as in lesson 1. Our base architecture is resnet50 again, but the metrics are a little bit differeent: we use `accuracy_thresh` instead of `accuracy`. In lesson 1, we determined the predicition for a given class by picking the final activation that was the biggest, but here, each activation can be 0. or 1. `accuracy_thresh` selects the ones that are above a certain threshold (0.5 by default) and compares them to the ground truth.As for Fbeta, it's the metric that was used by Kaggle on this competition. See [here](https://en.wikipedia.org/wiki/F1_score) for more details.
###Code
arch = models.resnet50
acc_02 = partial(accuracy_thresh, thresh=0.2)
f_score = partial(fbeta, thresh=0.2)
learn = cnn_learner(data, arch, metrics=[acc_02, f_score])
###Output
Downloading: "https://download.pytorch.org/models/resnet50-19c8e357.pth" to /root/.cache/torch/checkpoints/resnet50-19c8e357.pth
100%|██████████| 102502400/102502400 [00:01<00:00, 89430390.07it/s]
###Markdown
We use the LR Finder to pick a good learning rate.
###Code
learn.lr_find()
learn.recorder.plot()
###Output
_____no_output_____
###Markdown
Then we can fit the head of our network.
###Code
lr = 0.01
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1-rn50')
###Output
_____no_output_____
###Markdown
...And fine-tune the whole model:
###Code
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, slice(1e-5, lr/5))
learn.save('stage-2-rn50')
data = (src.transform(tfms, size=256)
.databunch().normalize(imagenet_stats))
learn.data = data
data.train_ds[0][0].shape
learn.freeze()
learn.lr_find()
learn.recorder.plot()
lr=1e-2/2
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1-256-rn50')
learn.unfreeze()
learn.fit_one_cycle(5, slice(1e-5, lr/5))
learn.recorder.plot_losses()
learn.save('stage-2-256-rn50')
###Output
_____no_output_____
###Markdown
You won't really know how you're going until you submit to Kaggle, since the leaderboard isn't using the same subset as we have for training. But as a guide, 50th place (out of 938 teams) on the private leaderboard was a score of `0.930`.
###Code
learn.export()
learn.export??
###Output
_____no_output_____
###Markdown
fin (This section will be covered in part 2 - please don't ask about it just yet! :) )
###Code
#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path}
#! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path}
#! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg-additional.tar.7z -p {path}
#! 7za -bd -y -so x {path}/test-jpg-additional.tar.7z | tar xf - -C {path}
test = ImageList.from_folder(path/'test-jpg').add(ImageList.from_folder(path/'test-jpg-additional'))
len(test)
learn = load_learner(path, test=test)
preds, _ = learn.get_preds(ds_type=DatasetType.Test)
thresh = 0.2
labelled_preds = [' '.join([learn.data.classes[i] for i,p in enumerate(pred) if p > thresh]) for pred in preds]
labelled_preds[:5]
fnames = [f.name[:-4] for f in learn.data.test_ds.items]
df = pd.DataFrame({'image_name':fnames, 'tags':labelled_preds}, columns=['image_name', 'tags'])
df.to_csv(path/'submission.csv', index=False)
! kaggle competitions submit planet-understanding-the-amazon-from-space -f {path/'submission.csv'} -m "My submission"
###Output
Warning: Your Kaggle API key is readable by other users on this system! To fix this, you can run 'chmod 600 /home/ubuntu/.kaggle/kaggle.json'
100%|██████████████████████████████████████| 2.18M/2.18M [00:02<00:00, 1.05MB/s]
Successfully submitted to Planet: Understanding the Amazon from Space |
pySpark/notebook.ipynb | ###Markdown
Football Twitter Streaming Imports the needed modules
###Code
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import lit, explode, split, col, from_json, to_json, json_tuple, window, struct, udf
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType, LongType
###Output
_____no_output_____
###Markdown
Set Up Spark Session and Define Schemas The schemas defined here are needed in order to extract the content from the twitter posts in the kafka topic
###Code
spark = SparkSession.builder.appName("wordCounter") \
.config('spark.sql.streaming.checkpointLocation','/home/jovyan/checkpoints') \
.config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1') \
.getOrCreate()
# Defines schema of Twitter Post
tweetSchema = StructType() \
.add("payload", StringType())
payloadSchema = StructType() \
.add("Text", StringType()) \
.add("Lang", StringType())
###Output
_____no_output_____
###Markdown
Utility Functions ExtractTweetPayload is a function used to extract a dataframe with the content and timestamp of the twitter post from a kafka message in json format.GetLastName is a UDF used to extract the last word in a string column of a dataframe.
###Code
# extracts structured content from json tweet message
def extractTweetPayload(df, tweetSchema, payloadSchema):
return df \
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)", "CAST(timestamp AS TIMESTAMP)", "offset") \
.withColumn("data", from_json("value", tweetSchema)) \
.withColumn("payload", from_json("data.payload", payloadSchema)) \
.select("payload.*", "key", "timestamp")
def getLastName(full_name):
return full_name.split(" ")[-1:][0]
###Output
_____no_output_____
###Markdown
Streaming Queries Here I define the streaming queries, they perform simple word count over specific columns of the kafka messages. I decided to go with hopping windows in order to visualize in a more efficient ways moving trends in the Twitter topics.
###Code
def tweetsCountQuery(df, colName):
return df \
.withWatermark("timestamp", "10 seconds") \
.groupBy(
window(col("timestamp"), "10 seconds", "5 seconds"),
col(colName)
).count() \
.select("timestamp", "count", to_json(struct("timestamp", "count")).alias("value"))
def wordCountQuery(df, colName):
return df \
.withWatermark("timestamp", "10 seconds") \
.withColumn('word', explode(split(col(colName), ' '))) \
.groupBy(window(col("timestamp"), "10 seconds", "5 seconds"),
col('word')
).count() \
.select("word", "count", to_json(struct("word", "count")).alias("value"))
def langCountQuery(df, colName):
return df \
.withWatermark("timestamp", "2 minutes") \
.groupBy(
window(col("timestamp"), "2 minutes", "1 minutes"),
col(colName)
).count() \
.select(colName, "count", to_json(struct(colName, "count")).alias("value"))
###Output
_____no_output_____
###Markdown
Static Dataset Import and Setup In order to extract the meaningful words from the twitter posts,I decided to load the public FIFA 21 dataset, which contains data about the most popular football players and clubs.In the following lines I load the dataset, then I extract and concatenate the list of teams and players into an unique dataframe.
###Code
players = spark.read \
.option("header", "true") \
.option("mode", "DROPMALFORMED") \
.csv("players_21.csv")
lastNameUDF = udf(getLastName, StringType())
player_names = players \
.withColumn(
"word", lastNameUDF("short_name")) \
.withColumn("category", lit("Player")) \
.select("word", "category") \
.limit(500) \
teams = players \
.select("club_name") \
.withColumn("category", lit("Team")) \
.limit(500) \
.dropDuplicates() \
topics = player_names.union(teams)
###Output
_____no_output_____
###Markdown
Kafka Data Source Config
###Code
# Reads the data from kafka
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "broker:9092") \
.option("failOnDataLoss", "false") \
.option("subscribe", "tweets") \
.option("startingOffsets", "earliest") \
.load()
messages = extractTweetPayload(df, tweetSchema, payloadSchema)
###Output
_____no_output_____
###Markdown
Streaming Queries Startup Run the following cells to start the streaming queries and write the final output into the respective Kafka topics Note that the queries will keep on running until you stop them,since they're operating on a never ending stream .
###Code
# the topics are counted from the queries and joined with the players and clubs dataframe
wordCount = wordCountQuery(messages, "Text") \
.join(topics, "word") \
.select("word", "count","category", to_json(struct("word", "count","category")).alias("value"))
# the final output is written to the CountByName Kafka topic
wordCountStreamQuery = wordCount \
.writeStream\
.format("kafka") \
.option("kafka.bootstrap.servers", "broker:9092") \
.option("topic", "countByName") \
.start()
wordCountStreamQuery.stop()
langCount = langCountQuery(messages, "Lang")
langCountStreamQuery = langCount \
.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "broker:9092") \
.option("topic", "countByLang") \
.start()
langCountStreamQuery.stop()
timestampCount = tweetsCountQuery(messages, "timestamp")
timestampCountStreamQuery = timestampCount \
.writeStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "broker:9092") \
.option("topic", "countByTimestamp") \
.start()
timestampCountStreamQuery.stop()
###Output
_____no_output_____ |
notebooks/Lab_03_Indexing.ipynb | ###Markdown
IndexingCreating the right index — with the right keys, in the right order, and using the right expressions — is critical to query performance in any database system. This is true for Couchbase as well. This topic provides an overview of the types of index that you can create using the Index Service, and explains how they help to query for data efficiently and improve query performance. Notes- In Couchbase, you need indexes to query any data. Without an index, you cannot run any queries. In the case of `travel-sample` data, the indexes are created for you when you import the sample bucket.- Indexes are created asynchronously and can take a bit of time before the process is completed.- You can create indexes using any of the following utilities: - The Couchbase Query Workbench (in the Web Console) - The Command-Line based Query Shell (cbq) - Our REST API - Any of our Language SDKs, including Python (which we’ll focus on today). Types of Indexes- Primary Index: The primary index is simply an index on the document key on the entire keyspace.- Secondary Index: A secondary index is an index on any key-value or document-key.- Composite Secondary Index: A secondary index using multiple keys.- Partial Index: An index defined on a subset of documents.- Covering Index: An index that includes the actual values of all the fields specified in the query.- Array Index: An index on array objects in documents. Primary IndexPrimary indexes contain a full set of keys in a given keyspace like in Relational Databases. Every primary index is maintained asynchronously. A primary index is intended to be used for simple queries, which have no filters or predicates.Primary indexes are optional and are only required for running ad hoc queries on a keyspace that is not supported by a secondary index. They are slow as the entire document has to be fetched to match them against the queries and hence not recommended for production.
###Code
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster, ClusterOptions
from couchbase.management.queries import (
CreatePrimaryQueryIndexOptions,
QueryIndexManager,
)
###Output
_____no_output_____
###Markdown
NoteIf you are running Couchbase locally on your machine via docker or installation, you can change the connection string to `couchbase://localhost`
###Code
cluster = Cluster.connect(
"couchbase://couchbase",
ClusterOptions(PasswordAuthenticator("Administrator", "Password")),
)
primary_idx_query = (
"CREATE PRIMARY INDEX primary_idx_hotels ON `travel-sample`.inventory.hotel"
)
try:
result = cluster.query(primary_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Check for the Created Index on Indexes menu in the Web ConsoleThis index will be used for all queries on the hotel collection in inventory scope of the travel-sample bucket in case there is no other index on this collection relevant to the query. The drawback with this index is that all the records have to fetched from the index to check whether it matches a query. This can be avoided by using specialized indexes with the relevant fields being indexed. Checking all Available IndexesYou can check for all the available indexes in the cluster by querying the `system:indexes` keyspace which is an internal keyspace which keeps track of all the indexes.
###Code
import pprint
pp = pprint.PrettyPrinter(indent=4, depth=6)
all_indexes_query = "SELECT * FROM system:indexes"
try:
result = cluster.query(all_indexes_query).execute()
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Explain: Check how the Query is being executedCouchbase allows you to check how the query is being used executed using the current indexes. You can click on `Explain` in the Web interface for the Query Workbench to see the plan for a query.The query plan for this query indicates that the query - Scans the Primary Index. - Fetches all the Hotel documents - Projects the `title` and `country` fields for all the fetched documentsThe Primary Index used here is different from the one created above as there was already a primary index on the same collection that was created when the sample bucket was imported.Note that the Execution Plans can change based on the indexes available. Couchbase automatically selects the best index for the query. Secondary IndexA secondary index is an index on any key-value or document-key. This index can use any key within the document and the key can be of any type: scalar, object, or array. The query has to use the same type of object for the query engine to use the index.
###Code
# This index will be used for queries that work with the hotel titles
secondary_idx_query = (
"CREATE INDEX idx_hotels_title ON `travel-sample`.inventory.hotel(title)"
)
try:
result = cluster.query(secondary_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Composite Secondary IndexIt is common to have queries with multiple filters (predicates). In such cases, you want to use indexes with multiple keys so the indexes can return only the qualified document keys. Additionally, if a query is referencing only the keys in the index, the query engine can simply answer the query from the index scan result without having to fetch from the data nodes. This is commonly used for performance optimization.We can create an index that will handle the query to get the name and country for each hotel in the inventory scope to make it more efficient than using the primary index.
###Code
# This index will be used for queries that work with the hotel titles & countries
hotel_title_country_idx_query = "CREATE INDEX idx_hotels_title_country ON `travel-sample`.inventory.hotel(title, country)"
try:
result = cluster.query(hotel_title_country_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Partial IndexUnlike relational systems where each type of row is in a distinct table, Couchbase keyspaces can have documents of various types. You can include a distinguishing field in your document to differentiate distinct types.For example, the landmark keyspace distinguishes types of landmark using the activity field. Couchbase allows you to create indexes for specific activities from them.
###Code
activities = "SELECT DISTINCT activity FROM `travel-sample`.inventory.landmark"
try:
result = cluster.query(activities)
for row in result:
print(row)
except Exception as e:
print(e)
# Create an index for landmarks that are of type 'eat'
restaurants_index_query = "CREATE INDEX landmarks_eat ON `travel-sample`.inventory.landmark(name, id, address) WHERE activity='eat'"
try:
result = cluster.query(restaurants_index_query).execute()
except Exception as e:
print(e)
all_indexes_query = "SELECT * FROM system:indexes where name='landmarks_eat'"
try:
result = cluster.query(all_indexes_query).execute()
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Covering IndexWhen an index includes the actual values of all the fields specified in the query, the index covers the query and does not require an additional step to fetch the actual values from the data service. An index, in this case, is called a covering index and the query is called a covered query. As a result, covered queries are faster and deliver better performance.
###Code
hotel_state_index_query = (
"CREATE INDEX idx_state on `travel-sample`.inventory.hotel (state)"
)
try:
result = cluster.query(hotel_state_index_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
We can see the query execution plan using the EXPLAIN statement. When a query uses a covering index, the EXPLAIN statement shows that a covering index is used for data access, thus avoiding the overhead associated with key-value document fetches. If we select state from the hotel keyspace, the actual values of the field state that are to be returned are present in the index idx_state, and avoids an additional step to fetch the data. In this case, the index idx_state is called a covering index and the query is a covered query.
###Code
query_plan_example = (
"EXPLAIN SELECT state FROM `travel-sample`.inventory.hotel WHERE state = 'CA'"
)
try:
result = cluster.query(query_plan_example)
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Array IndexingArray Indexing adds the capability to create global indexes on array elements and optimizes the execution of queries involving array elements.
###Code
# Create an index on all schedules
# Here, we create an index on all the distinct flight schedules
schedules_index_query = "CREATE INDEX idx_sched ON `travel-sample`.inventory.route ( DISTINCT ARRAY v.flight FOR v IN schedule END )"
try:
result = cluster.query(schedules_index_query).execute()
except Exception as e:
print(e)
# Select scheduled flights operated by 'UA'
query_schedules = "SELECT * FROM `travel-sample`.inventory.route WHERE ANY v IN schedule SATISFIES v.flight LIKE 'UA%' END LIMIT 5"
try:
result = cluster.query(query_schedules)
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
# Index on Flight Stops
flight_stops_index = "CREATE INDEX idx_flight_stops ON `travel-sample`.inventory.route( stops, DISTINCT ARRAY v.flight FOR v IN schedule END )"
try:
result = cluster.query(flight_stops_index).execute()
except Exception as e:
print(e)
# Select flights with a stopover
filter_stops_query = "SELECT * FROM `travel-sample`.inventory.route WHERE stops >=1 AND ANY v IN schedule SATISFIES v.flight LIKE 'FL%' END"
try:
result = cluster.query(filter_stops_query)
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Dropping IndexesThe DROP INDEX statement allows you to drop a named primary index or a secondary index. You can drop an index by specifying the name of the index and the keyspace (bucket.scope.collection).
###Code
# This query will drop the index idx_hotels_title that we created earlier
drop_idx_query = "DROP INDEX idx_hotels_title ON `travel-sample`.inventory.hotel"
try:
result = cluster.query(drop_idx_query).execute()
except Exception as e:
print(e)
# This query will drop the primary index primary_idx_hotels that we created earlier
# It is recommended to not have primary indexes on production systems as they scan all the documents in the collection
drop_primary_idx_query = (
"DROP INDEX primary_idx_hotels ON `travel-sample`.inventory.hotel"
)
try:
result = cluster.query(drop_primary_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Observe (Optional)Could you try to observe the performance difference between using the Primary Index & the Secondary Index? For this experiment if you are working with the travel-sample data, you would have to delete some of the existing Indexes. Query OptimizationQuery Optimization tries to optimize queries in various forms and scenarios to bring efficiency. Each optimization is different and results in a different amount of performance benefit.Tuning is iterative and involves the following basic steps:- Identifying the slowly performing or high resource consumption N1QL statements that are responsible for a large share of the application workload and system resources. Generally tuning the slower and most frequently used N1QL queries will yield the highest results. Additionally, depending on your response and SLA needs you will need to identify and tune specific queries. As in many scenarios generally, the Pareto principle applies to query tuning as well - 80% of your workload/performance problems are probably caused by 20% of your queries - focus and tune that 20% of your queries- Verify that the execution plans produced by the query optimizer for these statements are reasonable and expected. Note: Couchbase currently is a RULE based optimizer and not a COST based optimizer so key or index cardinality do not impact the choice of the index or creation of the overall query plan- Implement corrective actions to generate better execution plans for poorly performing SQL statementsThe previous steps are repeated until the query performance reaches a satisfactory level or no more statements can be tuned.For more details on optimizing your queries, you can check the [Learning Path on our Developer Portal](https://developer.couchbase.com/learn/n1ql-query-performance-guide). Exercise 3.1- Create an index to cover the query: "SELECT name, url, city from \`travel-sample\`.inventory.hotel where country='United Kingdom'"- Create an index to query airports that are over the altitude of 1000. You can look at the alt field inside geo. Solutions
###Code
# Covered Index on Hotels
# Explain the query to check the index
# Airports with altitude over 1000
# Explain the query to check the index
###Output
_____no_output_____
###Markdown
IndexingCreating the right index — with the right keys, in the right order, and using the right expressions — is critical to query performance in any database system. This is true for Couchbase as well. This topic provides an overview of the types of index that you can create using the Index Service, and explains how they help to query for data efficiently and improve query performance. Notes- In Couchbase, you need indexes to query any data. Without an index, you cannot run any queries. In the case of `travel-sample` data, the indexes are created for you when you import the sample bucket.- Indexes are created asynchronously and can take a bit of time before the process is completed.- You can create indexes using any of the following utilities: - The Couchbase Query Workbench (in the Web Console) - The Command-Line based Query Shell (cbq) - Our REST API - Any of our Language SDKs, including Python (which we’ll focus on today). Types of Indexes- Primary Index: The primary index is simply an index on the document key on the entire keyspace.- Secondary Index: A secondary index is an index on any key-value or document-key.- Composite Secondary Index: A secondary index using multiple keys.- Partial Index: An index defined on a subset of documents.- Covering Index: An index that includes the actual values of all the fields specified in the query.- Array Index: An index on array objects in documents. Primary IndexPrimary indexes contain a full set of keys in a given keyspace like in Relational Databases. Every primary index is maintained asynchronously. A primary index is intended to be used for simple queries, which have no filters or predicates.Primary indexes are optional and are only required for running ad hoc queries on a keyspace that is not supported by a secondary index. They are slow as the entire document has to be fetched to match them against the queries and hence not recommended for production.
###Code
from couchbase.auth import PasswordAuthenticator
from couchbase.cluster import Cluster, ClusterOptions
from couchbase.management.queries import (
CreatePrimaryQueryIndexOptions,
QueryIndexManager,
)
cluster = Cluster.connect(
"couchbase://localhost",
ClusterOptions(PasswordAuthenticator("Administrator", "Password")),
)
primary_idx_query = (
"CREATE PRIMARY INDEX primary_idx_hotels ON `travel-sample`.inventory.hotel"
)
try:
result = cluster.query(primary_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Check for the Created Index on Indexes menu in the Web ConsoleThis index will be used for all queries on the hotel collection in inventory scope of the travel-sample bucket in case there is no other index on this collection relevant to the query. The drawback with this index is that all the records have to fetched from the index to check whether it matches a query. This can be avoided by using specialized indexes with the relevant fields being indexed. Checking all Available IndexesYou can check for all the available indexes in the cluster by querying the `system:indexes` keyspace which is an internal keyspace which keeps track of all the indexes.
###Code
import pprint
pp = pprint.PrettyPrinter(indent=4, depth=6)
all_indexes_query = "SELECT * FROM system:indexes"
try:
result = cluster.query(all_indexes_query).execute()
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Explain: Check how the Query is being executedCouchbase allows you to check how the query is being used executed using the current indexes. You can click on `Explain` in the Web interface for the Query Workbench to see the plan for a query.The query plan for this query indicates that the query - Scans the Primary Index. - Fetches all the Hotel documents - Projects the `title` and `country` fields for all the fetched documentsThe Primary Index used here is different from the one created above as there was already a primary index on the same collection that was created when the sample bucket was imported.Note that the Execution Plans can change based on the indexes available. Couchbase automatically selects the best index for the query. Secondary IndexA secondary index is an index on any key-value or document-key. This index can use any key within the document and the key can be of any type: scalar, object, or array. The query has to use the same type of object for the query engine to use the index.
###Code
# This index will be used for queries that work with the hotel titles
secondary_idx_query = (
"CREATE INDEX idx_hotels_title ON `travel-sample`.inventory.hotel(title)"
)
try:
result = cluster.query(secondary_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Composite Secondary IndexIt is common to have queries with multiple filters (predicates). In such cases, you want to use indexes with multiple keys so the indexes can return only the qualified document keys. Additionally, if a query is referencing only the keys in the index, the query engine can simply answer the query from the index scan result without having to fetch from the data nodes. This is commonly used for performance optimization.We can create an index that will handle the query to get the name and country for each hotel in the inventory scope to make it more efficient than using the primary index.
###Code
# This index will be used for queries that work with the hotel titles & countries
hotel_title_country_idx_query = "CREATE INDEX idx_hotels_title_country ON `travel-sample`.inventory.hotel(title, country)"
try:
result = cluster.query(hotel_title_country_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Partial IndexUnlike relational systems where each type of row is in a distinct table, Couchbase keyspaces can have documents of various types. You can include a distinguishing field in your document to differentiate distinct types.For example, the landmark keyspace distinguishes types of landmark using the activity field. Couchbase allows you to create indexes for specific activities from them.
###Code
activities = "SELECT DISTINCT activity FROM `travel-sample`.inventory.landmark"
try:
result = cluster.query(activities)
for row in result:
print(row)
except Exception as e:
print(e)
# Create an index for landmarks that are of type 'eat'
restaurants_index_query = "CREATE INDEX landmarks_eat ON `travel-sample`.inventory.landmark(name, id, address) WHERE activity='eat'"
try:
result = cluster.query(restaurants_index_query).execute()
except Exception as e:
print(e)
all_indexes_query = "SELECT * FROM system:indexes where name='landmarks_eat'"
try:
result = cluster.query(all_indexes_query).execute()
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Covering IndexWhen an index includes the actual values of all the fields specified in the query, the index covers the query and does not require an additional step to fetch the actual values from the data service. An index, in this case, is called a covering index and the query is called a covered query. As a result, covered queries are faster and deliver better performance.
###Code
hotel_state_index_query = (
"CREATE INDEX idx_state on `travel-sample`.inventory.hotel (state)"
)
try:
result = cluster.query(hotel_state_index_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
We can see the query execution plan using the EXPLAIN statement. When a query uses a covering index, the EXPLAIN statement shows that a covering index is used for data access, thus avoiding the overhead associated with key-value document fetches. If we select state from the hotel keyspace, the actual values of the field state that are to be returned are present in the index idx_state, and avoids an additional step to fetch the data. In this case, the index idx_state is called a covering index and the query is a covered query.
###Code
query_plan_example = (
"EXPLAIN SELECT state FROM `travel-sample`.inventory.hotel WHERE state = 'CA'"
)
try:
result = cluster.query(query_plan_example)
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Array IndexingArray Indexing adds the capability to create global indexes on array elements and optimizes the execution of queries involving array elements.
###Code
# Create an index on all schedules
# Here, we create an index on all the distinct flight schedules
schedules_index_query = "CREATE INDEX idx_sched ON `travel-sample`.inventory.route ( DISTINCT ARRAY v.flight FOR v IN schedule END )"
try:
result = cluster.query(schedules_index_query).execute()
except Exception as e:
print(e)
# Select scheduled flights operated by 'UA'
query_schedules = "SELECT * FROM `travel-sample`.inventory.route WHERE ANY v IN schedule SATISFIES v.flight LIKE 'UA%' END LIMIT 5"
try:
result = cluster.query(query_schedules)
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
# Index on Flight Stops
flight_stops_index = "CREATE INDEX idx_flight_stops ON `travel-sample`.inventory.route( stops, DISTINCT ARRAY v.flight FOR v IN schedule END )"
try:
result = cluster.query(flight_stops_index).execute()
except Exception as e:
print(e)
# Select flights with a stopover
filter_stops_query = "SELECT * FROM `travel-sample`.inventory.route WHERE stops >=1 AND ANY v IN schedule SATISFIES v.flight LIKE 'FL%' END"
try:
result = cluster.query(filter_stops_query)
for row in result:
pp.pprint(row)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Dropping IndexesThe DROP INDEX statement allows you to drop a named primary index or a secondary index. You can drop an index by specifying the name of the index and the keyspace (bucket.scope.collection).
###Code
# This query will drop the index idx_hotels_title that we created earlier
drop_idx_query = "DROP INDEX idx_hotels_title ON `travel-sample`.inventory.hotel"
try:
result = cluster.query(drop_idx_query).execute()
except Exception as e:
print(e)
# This query will drop the primary index primary_idx_hotels that we created earlier
# It is recommended to not have primary indexes on production systems as they scan all the documents in the collection
drop_primary_idx_query = (
"DROP INDEX primary_idx_hotels ON `travel-sample`.inventory.hotel"
)
try:
result = cluster.query(drop_primary_idx_query).execute()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Observe (Optional)Could you try to observe the performance difference between using the Primary Index & the Secondary Index? For this experiment if you are working with the travel-sample data, you would have to delete some of the existing Indexes. Query OptimizationQuery Optimization tries to optimize queries in various forms and scenarios to bring efficiency. Each optimization is different and results in a different amount of performance benefit.Tuning is iterative and involves the following basic steps:- Identifying the slowly performing or high resource consumption N1QL statements that are responsible for a large share of the application workload and system resources. Generally tuning the slower and most frequently used N1QL queries will yield the highest results. Additionally, depending on your response and SLA needs you will need to identify and tune specific queries. As in many scenarios generally, the Pareto principle applies to query tuning as well - 80% of your workload/performance problems are probably caused by 20% of your queries - focus and tune that 20% of your queries- Verify that the execution plans produced by the query optimizer for these statements are reasonable and expected. Note: Couchbase currently is a RULE based optimizer and not a COST based optimizer so key or index cardinality do not impact the choice of the index or creation of the overall query plan- Implement corrective actions to generate better execution plans for poorly performing SQL statementsThe previous steps are repeated until the query performance reaches a satisfactory level or no more statements can be tuned.For more details on optimizing your queries, you can check the [Learning Path on our Developer Portal](https://developer.couchbase.com/learn/n1ql-query-performance-guide). Exercise 3.1- Create an index to cover the query: "SELECT name, url, city from \`travel-sample\`.inventory.hotel where country='United Kingdom'"- Create an index to query airports that are over the altitude of 1000. You can look at the alt field inside geo. Solutions
###Code
# Covered Index on Hotels
# Explain the query to check the index
# Airports with altitude over 1000
# Explain the query to check the index
###Output
_____no_output_____ |
v1_exploration/fastdmdt.ipynb | ###Markdown
Fast dmdt method Ashish Mahabal 2018-11-12 Imports
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
###Output
_____no_output_____
###Markdown
Read data 'KeplerSampleFullQ.npy' has 2500 light curves. Each is a array with columns: time, mag, magerr We will use the 251st light curve as out example
###Code
kdata = np.load('KeplerSampleFullQ.npy',encoding='bytes')
print(kdata.shape)
print(len(kdata[250][0]))
###Output
(2500, 3)
3534
###Markdown
plot the light curve
###Code
plt.plot(kdata[250][0],kdata[250][1],'.')
# Usage: xd = pairwisediffs(kdata[250][0]); yd = pairwisediffs(kdata[250][1])
def pairwisediffs(arrayoned):
x = arrayoned.reshape((1,len(arrayoned)))
xdm = x[:] - np.transpose(x[:])
xd = xdm[np.triu_indices(len(x[0]), k = 1)]
return(xd)
xd = pairwisediffs(kdata[250][0])
yd = pairwisediffs(kdata[250][1])
###Output
_____no_output_____
###Markdown
get the dmints and dtints
###Code
dmints = [-0.01, -0.006, -0.005, -0.004, -0.0012, -0.001, -0.0009, -0.0007,
-0.0006, -0.0005, -0.0004, -0.0003, -0.0002, -0.00015, -0.0001,
-0.00005, 0, 0.00005, 0.0001, 0.0002, 0.0003, 0.001, 0.0012, 0.003,
0.004, 0.005, 0.006, 0.01, 0.02]
dtints = [-1.0/145, 20.0/145, 30.0/145, 45.0/145, 60.0/145, 80.0/145, 90.0/145,
100.0/145, 120.0/145, 140.0/145, 1.0, 1.1, 1.2,
1.3, 1.4, 1.5, 1.7, 2, 2.25, 2.5, 3.0, 6 , 9, 15, 20, 30, 45, 60, 90]
###Output
_____no_output_____
###Markdown
Plot the 2D histogram with native bins (as defined above) H[0] contains the densities, H[1] and H[2] contain the binedges, H[3] is meshgrid The x- and y-bins as shown are unequal sized
###Code
H = plt.hist2d(xd,yd,bins=[dtints,dmints],range=None,normed=False)
plt.colorbar(H[3])
###Output
_____no_output_____
###Markdown
Using equal sized bins (default to hist) shows how the x (dt) distribution is
###Code
plt.hist(xd,bins=28)
###Output
_____no_output_____
###Markdown
and this is how it is with the bins we have defined Clearly, the first few have very few points in them Note that the bins have different sizes
###Code
plt.hist(xd,bins=dtints)
###Output
_____no_output_____
###Markdown
Using equal sized bins shows how the y (dm) distribution is
###Code
plt.hist(yd)
###Output
_____no_output_____
###Markdown
... and this is how it is with the bins we have defined Clearly, several on either side have very few points in them Here the bins are more symmetric (though still unequal sized)
###Code
plt.hist(yd,bins=dmints)
###Output
_____no_output_____
###Markdown
Plot the 2d histogram using equalsized pixels
###Code
plt.imshow(H[0])
plt.colorbar()
###Output
_____no_output_____
###Markdown
Normalize H by sum
###Code
G = 255*H[0]/np.sum(H[0])
plt.imshow(G)
plt.colorbar()
print(np.sum(G))
# USAGE: fastdmdt = get2dhist([kdata[250][0],kdata[250][1]])
def get2dhist(lightcurve):
xd = pairwisediffs(lightcurve[0])
yd = pairwisediffs(lightcurve[1])
H,xe,ye = np.histogram2d(xd,yd,bins=[dtints,dmints],range=None,normed=False)
G = 255*H/np.sum(H)
return G
###Output
_____no_output_____
###Markdown
Store G along with dmints and dtints used to calculate it Ideal to do this for all light curves, and store dtints and dmints only once
###Code
fastdmdt = get2dhist([kdata[250][0],kdata[250][1]])
plt.imshow(fastdmdt.T, norm = LogNorm(), origin="lower")
plt.colorbar()
fastdmdt = get2dhist([kdata[25][0],kdata[25][1]])
plt.imshow(fastdmdt.T, norm = LogNorm(), origin="lower")
plt.colorbar()
def plotme(n):
fastdmdt = get2dhist([kdata[n][0],kdata[n][1]])
plt.imshow(fastdmdt.T, norm = LogNorm(), origin="lower")
plt.colorbar()
plotme(0)
plotme(1)
plotme(2)
###Output
_____no_output_____ |
StatsForDataAnalysis/stat.binomial_test_with_plots.ipynb | ###Markdown
Биномиальный критерий для доли
###Code
import numpy as np
from scipy import stats
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Shaken, not stirred Джеймс Бонд говорит, что предпочитает мартини взболтанным, но не смешанным. Проведём слепой тест (blind test): $n$ раз предложим ему пару напитков и выясним, какой из двух он предпочитает. Получаем: * **выборка:** бинарный вектор длины $n$, где 1 — Джеймс Бонд предпочел взболтанный напиток, 0 — смешанный;* **гипотеза $H_0$:** Джеймс Бонд не различает 2 вида напитков и выбирает наугад;* **статистика $T$:** количество единиц в выборке. Если нулевая гипотеза справедлива и Джеймс Бонд действительно выбирает наугад, то мы можем с одинаковой вероятностью получить любой из $2^n$ бинарных векторов длины $n$. Мы могли бы перебрать все такие векторы, посчитать на каждом значение статистики $T$ и получить таким образом её нулевое распределение. Но в данном случае этот этап можно пропустить: мы имеем дело с выборкой, состоящей из 0 и 1, то есть, из распределения Бернулли $Ber(p)$. Нулевая гипотеза выбора наугад соответствует значению $p=\frac1{2}$, то есть, в каждом эксперименте вероятность выбора взболтанного мартини равна $\frac1{2}$. Сумма $n$ одинаково распределённых бернуллиевских случайных величин с параметром $p$ имеет биномиальное распределение $Bin(n, p)$. Следовательно, нулевое распределение статистики $T$ — $Bin\left(n, \frac1{2}\right)$.Пусть $n=16.$
###Code
n = 100
F_H0 = stats.binom(n, 0.67)
x = np.linspace(0,99,100)
pylab.bar(x, F_H0.pmf(x), align = 'center')
xlim(-0.5, 100.5)
pylab.show()
###Output
_____no_output_____
###Markdown
Односторонняя альтернатива **гипотеза $H_1$:** Джеймс Бонд предпочитает взболтанный мартини.При такой альтернативе более вероятны большие значения статистики; при расчёте достигаемого уровня значимости будем суммировать высоту столбиков в правом хвосте распределения.
###Code
pylab.bar(x, F_H0.pmf(x), align = 'center')
pylab.bar(np.linspace(12,16,5), F_H0.pmf(np.linspace(12,16,5)), align = 'center', color='red')
xlim(-0.5, 100.5)
pylab.show()
stats.binom_test(12, 16, 0.5, alternative = 'greater')
pylab.bar(x, F_H0.pmf(x), align = 'center')
pylab.bar(np.linspace(11,16,6), F_H0.pmf(np.linspace(11,16,6)), align = 'center', color='red')
xlim(-0.5, 16.5)
pylab.show()
stats.binom_test(11, 16, 0.5, alternative = 'greater')
###Output
_____no_output_____
###Markdown
Двусторонняя альтернатива **гипотеза $H_1$:** Джеймс Бонд предпочитает какой-то определённый вид мартини.При такой альтернативе более вероятны очень большие и очень маленькие значения статистики; при расчёте достигаемого уровня значимости будем суммировать высоту столбиков в правом и левом хвостах распределения.
###Code
pylab.bar(x, F_H0.pmf(x), align = 'center')
pylab.bar(np.linspace(12,16,5), F_H0.pmf(np.linspace(12,16,5)), align = 'center', color='red')
pylab.bar(np.linspace(0,4,5), F_H0.pmf(np.linspace(0,4,5)), align = 'center', color='red')
xlim(-0.5, 16.5)
pylab.show()
stats.binom_test(12, 16, 0.5, alternative = 'two-sided')
pylab.bar(x, F_H0.pmf(x), align = 'center')
pylab.bar(np.linspace(13,16,4), F_H0.pmf(np.linspace(13,16,4)), align = 'center', color='red')
pylab.bar(np.linspace(0,3,4), F_H0.pmf(np.linspace(0,3,4)), align = 'center', color='red')
xlim(-0.5, 16.5)
pylab.show()
stats.binom_test(13, 16, 0.5, alternative = 'two-sided')
###Output
_____no_output_____ |
Section 5 - Classification and Regression/5.3/hands-on.ipynb | ###Markdown
DecisionTreeClassifier
###Code
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
sample_libsvm_data.show()
# Creating our stages:
# STAGE 1:
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
label_indexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(
sample_libsvm_data
)
# STAGE 2:
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
feature_indexer = VectorIndexer(
inputCol="features", outputCol="indexedFeatures", maxCategories=4
).fit(sample_libsvm_data)
# STAGE 3:
# Train a DecisionTree model.
decission_tree_classifier_model = DecisionTreeClassifier(
labelCol="indexedLabel", featuresCol="indexedFeatures"
)
print(type(decission_tree_classifier_model))
# Creating our Pipeline:
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(
stages=[
label_indexer, # STAGE 1
feature_indexer, # STAGE 2
decission_tree_classifier_model, # STAGE 3
]
)
# Split the data into training and test sets (30% held out for testing)
(training_data, test_data) = sample_libsvm_data.randomSplit([0.7, 0.3])
# Train model. This also runs the indexers.
model = pipeline.fit(training_data)
# Make predictions.
predictions = model.transform(test_data)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy"
)
accuracy = evaluator.evaluate(predictions)
print(f"Test Error = {1.0 - accuracy:.5f} ")
# You can see that the Pipeline and the PipelineModel have the same stages
print(pipeline.getStages())
print(model.stages)
###Output
[StringIndexer_5d082cd39d50, VectorIndexer_f0357926937b, DecisionTreeClassifier_a882569e7e76]
[StringIndexer_5d082cd39d50, VectorIndexer_f0357926937b, DecisionTreeClassificationModel (uid=DecisionTreeClassifier_a882569e7e76) of depth 2 with 5 nodes]
###Markdown
Random Forest Regression
###Code
from pyspark.ml import Pipeline
from pyspark.ml.regression import RandomForestRegressor
from pyspark.ml.feature import VectorIndexer
from pyspark.ml.evaluation import RegressionEvaluator
sample_libsvm_data.show()
# Creating our stages:
# STAGE 1:
# Automatically identify categorical features, and index them.
feature_indexer = VectorIndexer(
inputCol="features",
outputCol="indexedFeatures",
# Set maxCategories so features with > 4 distinct values are treated as continuous.
maxCategories=4,
).fit(sample_libsvm_data)
# STAGE 2:
# Train a RandomForest model.
random_forest_model = RandomForestRegressor(featuresCol="indexedFeatures")
# Creating our Pipeline:
# Chain indexer and forest in a Pipeline
pipeline = Pipeline(stages=[feature_indexer, random_forest_model])
# Split the data into training and test sets (30% held out for testing)
(training_data, test_data) = sample_libsvm_data.randomSplit([0.7, 0.3])
# Train model. This also runs the indexer.
model = pipeline.fit(training_data)
# Make predictions.
predictions = model.transform(test_data)
# Select example rows to display.
predictions.select("prediction", "label", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = RegressionEvaluator(
labelCol="label", predictionCol="prediction", metricName="rmse"
)
rmse = evaluator.evaluate(predictions)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
# You can see that the Pipeline and the PipelineModel have the same stages
print(pipeline.getStages())
print(model.stages)
# The last stage in a PipelineModel is usually the most informative
print(model.stages[-1])
# Here you can see that pipeline and model are Pipeline and PipelineModel classes
print("pipeline:", type(pipeline))
print("model:", type(model))
###Output
pipeline: <class 'pyspark.ml.pipeline.Pipeline'>
model: <class 'pyspark.ml.pipeline.PipelineModel'>
|
week-4/jupyter_build/02_python_scraping_HTML_with_requests_BeautifulSoup.ipynb | ###Markdown
Web Data Scraping AcknowledgementsThese notebooks are adaptations from a 5 session mini course at the University of Colorado. The github repo can be found [here](https://github.com/CU-ITSS/Web-Data-Scraping-S2019) [Spring 2019 ITSS Mini-Course] The course is taught by [Brian C. Keegan, Ph.D.](http://brianckeegan.com/) [Assistant Professor, Department of Information Science](https://www.colorado.edu/cmci/people/information-science/brian-c-keegan). They have been adapted for relevant content and integration with Docker so that we all have the same environment. Professor Keegan suggests using a most recent version of Python (3.7) which is set in the `requirements.txt` file.The Spring ITSS Mini-Course was adapted from a number of sources including [Allison Morgan](https://allisonmorgan.github.io/) for the [2018 Summer Institute for Computational Social Science](https://github.com/allisonmorgan/sicss_boulder), which were in turn derived from [other resources](https://github.com/simonmunzert/web-scraping-with-r-extended-edition) developed by [Simon Munzert](http://simonmunzert.github.io/) and [Chris Bail](http://www.chrisbail.net/). This notebook is adapted from excellent notebooks in Dr. [Cody Buntain](http://cody.bunta.in/)'s seminar on [Social Media and Crisis Informatics](http://cody.bunta.in/teaching/2018_winter_umd_inst728e/) as well as the [PRAW documentation](https://praw.readthedocs.io/en/latest/). Parsing HTML data into tabular dataThe overall goal we have as researchers in scraping data from the web is converting data from one structured format (HTML's tree-like structures) into another structured format (probably a tabular structure with rows and columns). This could involve simply reading tables out of a webpage all the way up to taking irregularly-structured HTML elements into a tabular format. We are going to make some use of the [`pandas`](https://pandas.pydata.org/) library ("**pan**el **da**ta", not the cute animal), which is Python's implementation of a data frame concept. This is a very powerful and complex library that I typically spend more than 12 hours of lecture teaching in intermediate programming classes. I hope to convey some important elements as we work through material, but it is far beyond the scope of this class to be able to cover all the fundamentals and syntax. Let's begin by importing the libraries we'll need in this notebook: requests, BeautifulSoup, and pandas
###Code
# Most straight-forward way to import a librayr in Python
import requests
# BeautifulSoup is a module inside the "bs4" library, we only import the BeautifulSoup module
from bs4 import BeautifulSoup
# We import pandas but give the library a shortcut alias "pd" since we will call its functions so much
import pandas as pd
###Output
_____no_output_____
###Markdown
Reading an HTML table into Python[The Numbers](http://www.the-numbers.com) is a popular source of data about movies' box office revenue numbers. Their daily domestic charts are HTML tables with the top-grossing movies for each day of the year, going back for several years. This [table](https://www.the-numbers.com/box-office-chart/daily/2018/12/25) for Christmas day in 2018 has coluns for the current week's ranking, previous week's ranking, name of movie, distributor, gross, change over the previous week, number of theaters, revenue per theater, total gross, and number of days since release. This looks like a fairly straightforward table that could be read directly into data frame-like structure.Using the Inspect tool, we can see the table exists as a `` element with child tags like `` and `` (table row). Each `` has `` which defines each of the cells and their content. For more on how HTML defines tables, check out [this tutoral](https://www.w3schools.com/html/html_tables.asp).Using `requests` and `BeautifulSoup` we would get this webpage's HTML, turn it into soup, and then find the table (``) or the table rows (``) and pull out their content.
###Code
# Make the request
xmas_bo_raw = requests.get('https://www.the-numbers.com/box-office-chart/daily/2018/12/25').text
# Turn into soup, specify the HTML parser
xmas_bo_soup = BeautifulSoup(xmas_bo_raw,'html.parser')
# Use .find_all to retrieve all the tables in the page
xmas_bo_tables = xmas_bo_soup.find_all('table')
###Output
_____no_output_____
###Markdown
It turns out there are two tables on the page, the first is a baby table consisting of the "Previous Chart", "Chart Index", and "Next Chart" at the top. We want the second table with all the data: `xmas_bo_tables[1]` returns the second chart (remember that Python is 0-indexed, so the first chart is at `xmas_bo_tables[0]`). With this table identified, we can do a second `find_all` to get the table rows inside it and we save it as `xmas_bo_trs`.
###Code
xmas_bo_trs = xmas_bo_tables[1].find_all('tr')
###Output
_____no_output_____
###Markdown
Let's inspect a few of these rows. The first row in our list of rows under `xmas_bo_trs` should be the header with the names of the columns.
###Code
xmas_bo_trs[0]
###Output
_____no_output_____
###Markdown
The next table row should be for Aquaman.
###Code
xmas_bo_trs[1]
###Output
_____no_output_____
###Markdown
If we wanted to access the contents of this table row, we could use the `.contents` method to get a list of each of the `` table cells, which (frustratingly) intersperses newline characters.
###Code
xmas_bo_trs[1].contents
###Output
_____no_output_____
###Markdown
Another alternative is to use the `.text` method to get the text content of all the cells in this row.
###Code
xmas_bo_trs[1].text
###Output
_____no_output_____
###Markdown
The `\n` characters re-appear here, but if we `print` out this statement, we see their newline functionality.
###Code
print(xmas_bo_trs[1].text)
###Output
1
(1)
Aquaman
Warner Bros.
$21,982,419
+103%
4,125
$5,329
$105,407,869
5
###Markdown
We could use string processing to take this text string and convert it into a simple list of data. `.split('\n')` will split the string on the newline characters and return a list of what exists in between.
###Code
xmas_bo_trs[1].text.split('\n')
###Output
_____no_output_____
###Markdown
We'll write a `for` loop to go through all the table rows in `xmas_bo_trs`, get the list of data from the row, and add it back to a list of all the rows.
###Code
cleaned_xmas_bo_rows = []
# Loop through all the non-header (first row) table rows
for row in xmas_bo_trs[1:]:
# Get the text of the row and split on the newlines (like above)
cleaned_row = row.text.split('\n')
# Add this cleaned row back to the external list of row data
cleaned_xmas_bo_rows.append(cleaned_row)
# Inspect the first few rows of data
cleaned_xmas_bo_rows[:2]
###Output
_____no_output_____
###Markdown
Now we can pass this list of lists in `cleaned_xmas_bo_rows` to pandas's `DataFrame` function and hopefully get a nice table out.
###Code
xmas_bo_df = pd.DataFrame(cleaned_xmas_bo_rows)
# Inspect
xmas_bo_df.head()
###Output
_____no_output_____
###Markdown
We need to do a bit of cleanup on this data:* Columns 0 and 11 are all empty* Add column names
###Code
# Drop columns 0 and 11 and overwrite the xmas_box_df variable
xmas_bo_df = xmas_bo_df.drop(columns=[0,11])
# Rename the columns
xmas_bo_df.columns = ['Rank','Last rank','Movie','Distributor','Gross',
'Change','Theaters','Per theater','Total gross',
'Days']
# Write to disk
# xmas_bo_df.to_csv('christmas_2018_box_office.csv',encoding='utf8')
# Inspect
xmas_bo_df.head()
###Output
_____no_output_____
###Markdown
`pandas`'s `read_html`That was a good amount of work just to get this simple HTML table into Python. But it was important to cover how table elements moved from a string in `requests`, into a soup object from `BeautifulSoup`. into a list of data, and finally into `pandas`. `pandas` also has powerful functionality for reading tables directly from HTML. If we convert the soup of the first table (`xmas_bo_tables[1]`) back into a string, `pandas` can read it directly into a table. There are a few ideosyncracies here, the result is a list of dataframes—even if there's only a single table/dataframe—so we need to return the first (and only) element of this list. This is why there's a `[0]` at the end and the `.head()` is just to show the first five rows.
###Code
xmas_bo_table_as_string = str(xmas_bo_tables[1])
pd.read_html(xmas_bo_table_as_string)[0].head()
###Output
_____no_output_____
###Markdown
The column names got lumped in as rows, but we can fix this as well with the `read_html` function by passing the row index where the column lives. In this case, it is the first row, so we pass `header=0`.
###Code
pd.read_html(xmas_bo_table_as_string,header=0)[0].head()
###Output
_____no_output_____
###Markdown
Finally, you can point `read_html` at a URL without any `requests` or `BeautifulSoup` and get all the tables on the page as a list of DataFrames. `pandas` is simply doing the `requests` and `BeautifulSoup` on the inside. Interestingly, I'm getting a [HTTP 403](https://en.wikipedia.org/wiki/HTTP_403) error indicating the server (The Numbers) is forbidding the client (us) from accessing their data using this strategy. We will discuss next week whether and how to handle situations where web servers refuse connections from non-human clients. In this case, you cannot use the off-the-shelf `read_html` approach and would need to revert to using the `requests`+`BeautifulSoup` approach above.
###Code
simple_tables = pd.read_html('https://www.the-numbers.com/box-office-chart/daily/2018/12/25')
simple_tables
###Output
_____no_output_____
###Markdown
If we point it at Wikipedia's [2018 in film](https://en.wikipedia.org/wiki/2018_in_film), it will pull all of the tables present on the page.
###Code
simple_tables = pd.read_html('https://en.wikipedia.org/wiki/2018_in_film')
###Output
_____no_output_____
###Markdown
The first three correspond to the "Year in film" navigation box on the side and are poorly-formatted by default.
###Code
simple_tables[0]
###Output
_____no_output_____
###Markdown
The fourth table in the `simple_tables` list we got from parsing the Wikipedia page with `read_html` is the table under the "Highest-grossing films" section.
###Code
simple_tables[3]
###Output
_____no_output_____
###Markdown
You can pass the "header" option in `read_html` to make sure the column names from a particular row (in this case the first row) do not accidentally become rows of data.
###Code
wiki_top_grossing_t = pd.read_html('https://en.wikipedia.org/wiki/2018_in_film',header=0)[3]
wiki_top_grossing_t
###Output
_____no_output_____
###Markdown
Note that there are still a few errors in this table because the "Disney" value in the Wikipedia table spans two rows and `read_html` thus skips the "Distributor" value for Black Panther.
###Code
# Copy the value at index position 1, column position Distributor to Wordwide gross
wiki_top_grossing_t.loc[1,'Worldwide gross'] = wiki_top_grossing_t.loc[1,'Distributor']
# Change the value at 1, Distributor to Disney
wiki_top_grossing_t.loc[1,'Distributor'] = 'Disney'
wiki_top_grossing_t
###Output
_____no_output_____
###Markdown
Writing your own parserWe will return to the historical Oscars data. Even though data as prominent as this is likely to already exist in tabular format somewhere, we will maintain the illusion that we are the first to both scrape it and parse it into a tabular format. Our goal here is to write a parser that will (ideally) work across multiple pages; in this case, each of the award years.One of the first things we should do before writing any code is come up with a model of what we want our data to look like at the end of this. This is an intuitive and "tidy" format, but you might come up with alternatives based on your analysis and modeling needs.| *Year* | *Category* | *Nominee* | *Movie* | *Won* || --- | --- | --- | --- | --- || 2019 | Actor in a leading role | Christian Bale | Vice | NA || 2019 | Actor in a leading role | Bradley Cooper | A Star Is Born | NA || 2019 | Actor in a leading role | Willem Dafoe | At Eternity's Gate | NA || 2019 | Actor in a leading role | Rami Malek | Bohemian Rhapsody | NA || 2019 | Actor in a leading role | Viggo Mortensen | Green Book | NA |We will begin with writing a parser for a (hopefully!) representative year, then scrape the data for all the years, then apply the scraper to each of those years, and finally combine all the years' data together into a large data set. Let's begin with writing a parser for a (hopefully!) representative year: in this case, 2019 is actually not a great case because it is missing information about who won and lost since (at the time of my writing this notebook) the winners had not been announced. We will use 2018 instead and make the profoundly naïve assumption it should work the same going back in time.Start off with using `requests` to get the data and then use `BeautifulSoup` to turn it into soup we can parse through.
###Code
oscars2018_raw = requests.get('https://www.oscars.org/oscars/ceremonies/2018').text
oscars2018_soup = BeautifulSoup(oscars2018_raw)
###Output
_____no_output_____
###Markdown
Using the Inspect tool exercise from Class 1, the `` seems to be the most promising tag for us to extract. Use `.find_all('div',{'class':'view-grouping'})` to (hopefully!) get all of these award groups. Inspect the first and last ones to make sure they looks coherent.
###Code
# Get all the groups that have a <div class="view-grouping"> tag
oscars2018_groups = oscars2018_soup.find_all('div',{'class':'view-grouping'})
# Inspect the first one
oscars2018_groups[0]
###Output
_____no_output_____
###Markdown
The last group is something besides "Writing (Original Screenplay)" and it's not clear to me where this tag's content renders on the page.
###Code
# Inspect the last one
oscars2018_groups[-1]
###Output
_____no_output_____
###Markdown
This puts us into something of a bind going forward: if the `.find_all` returns more groupings than we expected, then it's not sufficiently precise to identify *only* groupings of nominees. However, there do not appear to be any child tags in the `oscars2018_groups[0]` grouping that uniquely differentiate them from the child tags present in the `oscars2018_groups[-1]` grouping. Another alternative is to simple parse the first 24 groupings, but this is a very brittle solution since other years' awards might have more or fewer groupings.
###Code
len(oscars2018_groups)
###Output
_____no_output_____
###Markdown
Navigating the HTML tree to find more specific parent elementsA third alternative is to leverage the tree structure of HTML and get the parent element in the hopes it is more unique than its children. In this case something like `` is a promising lead. Use `find_all` to search for this tag and confirm there is only one the one `` element (with its children) rather than multiple `` elements matching "quicktabs-container-honorees".
###Code
# Get the new tag group
oscars2018_parent_group = oscars2018_soup.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Hopefully there is only one group matching this pattern
len(oscars2018_parent_group)
###Output
_____no_output_____
###Markdown
So far so good, now we can use `find_all` on the soup for this `` to search *within* this specific parent group and hopefully there should be the 24 awards groupings. Nope, still 105. This is because
###Code
# Note the addition of the [0] since the _parent_group is a list with 1 element in it
# We just extract that single element (which is a soup) and then we can use find_all on it
oscars2018_true_groups = oscars2018_parent_group[0].find_all('div',{'class':'view-grouping'})
len(oscars2018_true_groups)
###Output
_____no_output_____
###Markdown
Hallelujah! The award names for each group live inside a ``, so we can `find_all` for those, loop through each, and print out the name.
###Code
for group in oscars2018_parent_group[0].find_all('div',{'class':'view-grouping-header'}):
print(group.text)
###Output
Actor in a Leading Role
Actor in a Supporting Role
Actress in a Leading Role
Actress in a Supporting Role
Animated Feature Film
Cinematography
Costume Design
Directing
Documentary (Feature)
Documentary (Short Subject)
Film Editing
Foreign Language Film
Makeup and Hairstyling
Music (Original Score)
Music (Original Song)
Best Picture
Production Design
Short Film (Animated)
Short Film (Live Action)
Sound Editing
Sound Mixing
Visual Effects
Writing (Adapted Screenplay)
Writing (Original Screenplay)
###Markdown
It turns out that the Oscars site loads a bunch of extra data that it does not render that lives underneath a `` which is where the 81 extra "awards" come from. This appears to be an attempt to organize the page by film, rather than by category. Navigating the HTML tree from a specific child to find specific generic parentsNow bear with me through some additional and presently unnecessary pain. Above, we were able to isolate the 24 category groupings we wanted through finding an appropriate *parent* tag and then working *down*. But I also want to show how we could identify the same 24 category groups by finding an appropriate *child* tag and working back up. This could be helpful in other situations where the elements are hard to disambiguate.Let's start by finding the tag for the "Actor in a Leading Role" from the soup containing all the tags.
###Code
oscars2018_groups[0]
###Output
_____no_output_____
###Markdown
Rather than finding *all* the `` present in the page, we only want the 23 *siblings* of this specific tag. We can use the `find_next_siblings()` to get these 23 siblings. I do not like this method very much because you have to find the "eldest" sibling and then combine it with its siblings later on if you want all the children. In this case, you'd need to keep track of the `` corresponding to Best Actor and then combine it with its 23 siblings, rather than an approach that simply returns all 24 in a single list.
###Code
oscars2018_group0_next_siblings = oscars2018_groups[0].find_next_siblings()
len(oscars2018_group0_next_siblings)
###Output
_____no_output_____
###Markdown
We could also go up to get the parent and then find all 24 of the `` among the children.
###Code
# From the child we like, get its parent
oscars2018_group0_parent = oscars2018_groups[0].parent
# Now with the parent, find all the relevant children
oscars2018_group0_parent_children = oscars2018_group0_parent.find_all('div',{'class':'view-grouping'})
# Confirm
len(oscars2018_group0_parent_children)
###Output
_____no_output_____
###Markdown
Checking the relevant fieldsThat seemed like a major digression away from the core task of writing a parser, but it is critical that we write a parser that parses *only* the data we want and nothing else. Now that we have our 24 awards groups in `oscars2018_true_groups`, let's break one open and extract all the yummy data waiting inside.There are a few `` sub-classes that are helpfully named that should make extracting this data a bit easier.* `` - name of the category* `` - winner* `` - name of actor* `` - title of movie
###Code
oscars2018_true_groups[0]
###Output
_____no_output_____
###Markdown
"Zoom in" to the `views-field-field-actor-name`.
###Code
oscars2018_true_groups[0].find_all('div',{'class':"views-field views-field-field-actor-name"})
###Output
_____no_output_____
###Markdown
These `` tags may be more specific and helpful.
###Code
oscars2018_true_groups[0].find_all('h4')
###Output
_____no_output_____
###Markdown
Zoom into the `views-field-title`.
###Code
oscars2018_true_groups[0].find_all('div',{'class':"views-field views-field-title"})
###Output
_____no_output_____
###Markdown
These `` tags may be more specific and helpful, but there are also empty tags here clogging things up.
###Code
oscars2018_true_groups[0].find_all('span')
###Output
_____no_output_____
###Markdown
As a battle-scarred web scraper, let me continue to emphasize the importance of quick-checking your assumptions before commiting to writing code. Are these fields still appropriate for other awards categories? Let's check the last category for original screenplay. Are the ``s for "field-actor-name" still people and for "field-title" still movies? Nope. Looking back at the web page, it's now obvious that the movie title and person who gets the award are flipped between actors/actresses and the other awards categories. We're going to have to keep this in mind going forward!
###Code
oscars2018_true_groups[-1].find_all('div',{'class':"views-field views-field-field-actor-name"})
oscars2018_true_groups[-1].find_all('div',{'class':"views-field views-field-title"})
###Output
_____no_output_____
###Markdown
Writing the core parser functionalityHow will we map the contents of the HTML to the * **Year**: All the awards are from the same year, also in the URL* **Category**: ``* **Nominee**: `` for actors, `` for non-actors* **Movie**: `` for actors, `` for non-actors* **Won**: `` for sibling, 0 for everyone else; alternatively just the top nominee
###Code
oscars2018_true_groups[0]
category = oscars2018_true_groups[0].find_all('h2')[0].text
print("The name of the category is:",category)
names = []
for _nominee in oscars2018_true_groups[0].find_all('h4'):
nominee_name = _nominee.text
names.append(nominee_name)
print("The name of a nominee is:",nominee_name)
movies = []
for _movie in oscars2018_true_groups[0].find_all('span'):
if len(_movie.text) > 0:
movie_name = _movie.text.strip()
movies.append(movie_name)
print("The name of a movie is:",movie_name)
###Output
The name of a movie is: Winner
The name of a movie is: Darkest Hour
The name of a movie is: Call Me by Your Name
The name of a movie is: Phantom Thread
The name of a movie is: Get Out
The name of a movie is: Roman J. Israel, Esq.
###Markdown
One strategy is to use Python's [`zip`](https://docs.python.org/3.7/library/functions.htmlzip) library to combine elements from different lists together. But `zip` is a bit too slick and abstract for my tastes.
###Code
# The elements of each list being combined need to be the same size
# So we make a list of the category name and multiply it by 5 to make it the same size as the others
list(zip([category]*5,names,movies))
###Output
_____no_output_____
###Markdown
Another strategy is to use the ``s for each nominee and extract the relevant information from its subdivs. This is a bit more intuitive in the sense of reading from top to bottom and also makes it easier to capture the winner and losers based on position.
###Code
actor_nominees = oscars2018_true_groups[0].find_all('div',{'class':'views-row'})
for i,nominee in enumerate(actor_nominees):
# If in the first position, the nominee won
if i == 0:
winner = 'Won'
# Otherwise, the nominee lost
else:
winner = 'Lost'
# Get a list of all the sub-divs
subdivs = nominee.find_all('div')
# The first subdiv (for an actor) is the name
name = subdivs[0].text.strip()
# The second subdiv (for an actor) is the movie name
movie = subdivs[1].text.strip()
print("{0} was nominated for \"{1}\" and {2}.".format(name,movie,winner))
###Output
Gary Oldman was nominated for "Darkest Hour" and Won.
Timothée Chalamet was nominated for "Call Me by Your Name" and Lost.
Daniel Day-Lewis was nominated for "Phantom Thread" and Lost.
Daniel Kaluuya was nominated for "Get Out" and Lost.
Denzel Washington was nominated for "Roman J. Israel, Esq." and Lost.
###Markdown
Check that reversing "movie" and "name" works for another award category like original screenplay (`oscars2018_true_groups[-1]`). There's some weirdness with "Written by" and "Story by" filtering in here rather than simply names that may need to get fixed in the final calculation, but I would want to talk to a domain expert about the differences between these labels.
###Code
original_screenplay_nominees = oscars2018_true_groups[-1].find_all('div',{'class':'views-row'})
for i,nominee in enumerate(original_screenplay_nominees):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
# movie and name reversed
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
print("{0} was nominated for \"{1}\" and {2}.".format(name,movie,winner))
###Output
Written by Jordan Peele was nominated for "Get Out" and Won.
Written by Emily V. Gordon & Kumail Nanjiani was nominated for "The Big Sick" and Lost.
Written by Greta Gerwig was nominated for "Lady Bird" and Lost.
Screenplay by Guillermo del Toro & Vanessa Taylor; Story by Guillermo del Toro was nominated for "The Shape of Water" and Lost.
Written by Martin McDonagh was nominated for "Three Billboards outside Ebbing, Missouri" and Lost.
###Markdown
This was just for Best Actors, now lets add another layer for all the different awards categories. We can see the movie name and awardee switch is important now since most of the categories are reversed.
###Code
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
print("{0} was nominated in {1} for {2}\" and {3}.".format(name,category,movie,winner))
###Output
Gary Oldman was nominated in Actor in a Leading Role for Darkest Hour" and Won.
Timothée Chalamet was nominated in Actor in a Leading Role for Call Me by Your Name" and Lost.
Daniel Day-Lewis was nominated in Actor in a Leading Role for Phantom Thread" and Lost.
Daniel Kaluuya was nominated in Actor in a Leading Role for Get Out" and Lost.
Denzel Washington was nominated in Actor in a Leading Role for Roman J. Israel, Esq." and Lost.
Sam Rockwell was nominated in Actor in a Supporting Role for Three Billboards outside Ebbing, Missouri" and Won.
Willem Dafoe was nominated in Actor in a Supporting Role for The Florida Project" and Lost.
Woody Harrelson was nominated in Actor in a Supporting Role for Three Billboards outside Ebbing, Missouri" and Lost.
Richard Jenkins was nominated in Actor in a Supporting Role for The Shape of Water" and Lost.
Christopher Plummer was nominated in Actor in a Supporting Role for All the Money in the World" and Lost.
Frances McDormand was nominated in Actress in a Leading Role for Three Billboards outside Ebbing, Missouri" and Won.
Sally Hawkins was nominated in Actress in a Leading Role for The Shape of Water" and Lost.
Margot Robbie was nominated in Actress in a Leading Role for I, Tonya" and Lost.
Saoirse Ronan was nominated in Actress in a Leading Role for Lady Bird" and Lost.
Meryl Streep was nominated in Actress in a Leading Role for The Post" and Lost.
Allison Janney was nominated in Actress in a Supporting Role for I, Tonya" and Won.
Mary J. Blige was nominated in Actress in a Supporting Role for Mudbound" and Lost.
Lesley Manville was nominated in Actress in a Supporting Role for Phantom Thread" and Lost.
Laurie Metcalf was nominated in Actress in a Supporting Role for Lady Bird" and Lost.
Octavia Spencer was nominated in Actress in a Supporting Role for The Shape of Water" and Lost.
Coco was nominated in Animated Feature Film for Lee Unkrich and Darla K. Anderson" and Won.
The Boss Baby was nominated in Animated Feature Film for Tom McGrath and Ramsey Naito" and Lost.
The Breadwinner was nominated in Animated Feature Film for Nora Twomey and Anthony Leo" and Lost.
Ferdinand was nominated in Animated Feature Film for Carlos Saldanha and Lori Forte" and Lost.
Loving Vincent was nominated in Animated Feature Film for Dorota Kobiela, Hugh Welchman and Ivan Mactaggart" and Lost.
Blade Runner 2049 was nominated in Cinematography for Roger A. Deakins" and Won.
Darkest Hour was nominated in Cinematography for Bruno Delbonnel" and Lost.
Dunkirk was nominated in Cinematography for Hoyte van Hoytema" and Lost.
Mudbound was nominated in Cinematography for Rachel Morrison" and Lost.
The Shape of Water was nominated in Cinematography for Dan Laustsen" and Lost.
Phantom Thread was nominated in Costume Design for Mark Bridges" and Won.
Beauty and the Beast was nominated in Costume Design for Jacqueline Durran" and Lost.
Darkest Hour was nominated in Costume Design for Jacqueline Durran" and Lost.
The Shape of Water was nominated in Costume Design for Luis Sequeira" and Lost.
Victoria & Abdul was nominated in Costume Design for Consolata Boyle" and Lost.
The Shape of Water was nominated in Directing for Guillermo del Toro" and Won.
Dunkirk was nominated in Directing for Christopher Nolan" and Lost.
Get Out was nominated in Directing for Jordan Peele" and Lost.
Lady Bird was nominated in Directing for Greta Gerwig" and Lost.
Phantom Thread was nominated in Directing for Paul Thomas Anderson" and Lost.
Icarus was nominated in Documentary (Feature) for Bryan Fogel and Dan Cogan" and Won.
Abacus: Small Enough to Jail was nominated in Documentary (Feature) for Steve James, Mark Mitten and Julie Goldman" and Lost.
Faces Places was nominated in Documentary (Feature) for Agnès Varda, JR and Rosalie Varda" and Lost.
Last Men in Aleppo was nominated in Documentary (Feature) for Feras Fayyad, Kareem Abeed and Søren Steen Jespersen" and Lost.
Strong Island was nominated in Documentary (Feature) for Yance Ford and Joslyn Barnes" and Lost.
Heaven Is a Traffic Jam on the 405 was nominated in Documentary (Short Subject) for Frank Stiefel" and Won.
Edith+Eddie was nominated in Documentary (Short Subject) for Laura Checkoway and Thomas Lee Wright" and Lost.
Heroin(e) was nominated in Documentary (Short Subject) for Elaine McMillion Sheldon and Kerrin Sheldon" and Lost.
Knife Skills was nominated in Documentary (Short Subject) for Thomas Lennon" and Lost.
Traffic Stop was nominated in Documentary (Short Subject) for Kate Davis and David Heilbroner" and Lost.
Dunkirk was nominated in Film Editing for Lee Smith" and Won.
Baby Driver was nominated in Film Editing for Paul Machliss and Jonathan Amos" and Lost.
I, Tonya was nominated in Film Editing for Tatiana S. Riegel" and Lost.
The Shape of Water was nominated in Film Editing for Sidney Wolinsky" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Film Editing for Jon Gregory" and Lost.
A Fantastic Woman was nominated in Foreign Language Film for Chile" and Won.
The Insult was nominated in Foreign Language Film for Lebanon" and Lost.
Loveless was nominated in Foreign Language Film for Russia" and Lost.
On Body and Soul was nominated in Foreign Language Film for Hungary" and Lost.
The Square was nominated in Foreign Language Film for Sweden" and Lost.
Darkest Hour was nominated in Makeup and Hairstyling for Kazuhiro Tsuji, David Malinowski and Lucy Sibbick" and Won.
Victoria & Abdul was nominated in Makeup and Hairstyling for Daniel Phillips and Lou Sheppard" and Lost.
Wonder was nominated in Makeup and Hairstyling for Arjen Tuiten" and Lost.
The Shape of Water was nominated in Music (Original Score) for Alexandre Desplat" and Won.
Dunkirk was nominated in Music (Original Score) for Hans Zimmer" and Lost.
Phantom Thread was nominated in Music (Original Score) for Jonny Greenwood" and Lost.
Star Wars: The Last Jedi was nominated in Music (Original Score) for John Williams" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Music (Original Score) for Carter Burwell" and Lost.
Remember Me was nominated in Music (Original Song) for from Coco; Music and Lyric by Kristen Anderson-Lopez and Robert Lopez" and Won.
Mighty River was nominated in Music (Original Song) for from Mudbound; Music and Lyric by Mary J. Blige, Raphael Saadiq and Taura Stinson" and Lost.
Mystery Of Love was nominated in Music (Original Song) for from Call Me by Your Name; Music and Lyric by Sufjan Stevens" and Lost.
Stand Up For Something was nominated in Music (Original Song) for from Marshall; Music by Diane Warren; Lyric by Lonnie R. Lynn and Diane Warren" and Lost.
This Is Me was nominated in Music (Original Song) for from The Greatest Showman; Music and Lyric by Benj Pasek and Justin Paul" and Lost.
The Shape of Water was nominated in Best Picture for Guillermo del Toro and J. Miles Dale, Producers" and Won.
Call Me by Your Name was nominated in Best Picture for Peter Spears, Luca Guadagnino, Emilie Georges and Marco Morabito, Producers" and Lost.
Darkest Hour was nominated in Best Picture for Tim Bevan, Eric Fellner, Lisa Bruce, Anthony McCarten and Douglas Urbanski, Producers" and Lost.
Dunkirk was nominated in Best Picture for Emma Thomas and Christopher Nolan, Producers" and Lost.
Get Out was nominated in Best Picture for Sean McKittrick, Jason Blum, Edward H. Hamm Jr. and Jordan Peele, Producers" and Lost.
Lady Bird was nominated in Best Picture for Scott Rudin, Eli Bush and Evelyn O'Neill, Producers" and Lost.
Phantom Thread was nominated in Best Picture for JoAnne Sellar, Paul Thomas Anderson, Megan Ellison and Daniel Lupi, Producers" and Lost.
The Post was nominated in Best Picture for Amy Pascal, Steven Spielberg and Kristie Macosko Krieger, Producers" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Best Picture for Graham Broadbent, Pete Czernin and Martin McDonagh, Producers" and Lost.
The Shape of Water was nominated in Production Design for Production Design: Paul Denham Austerberry; Set Decoration: Shane Vieau and Jeffrey A. Melvin" and Won.
Beauty and the Beast was nominated in Production Design for Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" and Lost.
Blade Runner 2049 was nominated in Production Design for Production Design: Dennis Gassner; Set Decoration: Alessandra Querzola" and Lost.
Darkest Hour was nominated in Production Design for Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" and Lost.
Dunkirk was nominated in Production Design for Production Design: Nathan Crowley; Set Decoration: Gary Fettis" and Lost.
Dear Basketball was nominated in Short Film (Animated) for Glen Keane and Kobe Bryant" and Won.
Garden Party was nominated in Short Film (Animated) for Victor Caire and Gabriel Grapperon" and Lost.
Lou was nominated in Short Film (Animated) for Dave Mullins and Dana Murray" and Lost.
Negative Space was nominated in Short Film (Animated) for Max Porter and Ru Kuwahata" and Lost.
Revolting Rhymes was nominated in Short Film (Animated) for Jakob Schuh and Jan Lachauer" and Lost.
The Silent Child was nominated in Short Film (Live Action) for Chris Overton and Rachel Shenton" and Won.
DeKalb Elementary was nominated in Short Film (Live Action) for Reed Van Dyk" and Lost.
The Eleven O'Clock was nominated in Short Film (Live Action) for Derin Seale and Josh Lawson" and Lost.
My Nephew Emmett was nominated in Short Film (Live Action) for Kevin Wilson, Jr." and Lost.
Watu Wote/All of Us was nominated in Short Film (Live Action) for Katja Benrath and Tobias Rosen" and Lost.
Dunkirk was nominated in Sound Editing for Richard King and Alex Gibson" and Won.
Baby Driver was nominated in Sound Editing for Julian Slater" and Lost.
Blade Runner 2049 was nominated in Sound Editing for Mark Mangini and Theo Green" and Lost.
The Shape of Water was nominated in Sound Editing for Nathan Robitaille and Nelson Ferreira" and Lost.
Star Wars: The Last Jedi was nominated in Sound Editing for Matthew Wood and Ren Klyce" and Lost.
Dunkirk was nominated in Sound Mixing for Gregg Landaker, Gary A. Rizzo and Mark Weingarten" and Won.
Baby Driver was nominated in Sound Mixing for Julian Slater, Tim Cavagin and Mary H. Ellis" and Lost.
Blade Runner 2049 was nominated in Sound Mixing for Ron Bartlett, Doug Hemphill and Mac Ruth" and Lost.
The Shape of Water was nominated in Sound Mixing for Christian Cooke, Brad Zoern and Glen Gauthier" and Lost.
Star Wars: The Last Jedi was nominated in Sound Mixing for David Parker, Michael Semanick, Ren Klyce and Stuart Wilson" and Lost.
Blade Runner 2049 was nominated in Visual Effects for John Nelson, Gerd Nefzer, Paul Lambert and Richard R. Hoover" and Won.
Guardians of the Galaxy Vol. 2 was nominated in Visual Effects for Christopher Townsend, Guy Williams, Jonathan Fawkner and Dan Sudick" and Lost.
Kong: Skull Island was nominated in Visual Effects for Stephen Rosenbaum, Jeff White, Scott Benza and Mike Meinardus" and Lost.
Star Wars: The Last Jedi was nominated in Visual Effects for Ben Morris, Mike Mulholland, Neal Scanlan and Chris Corbould" and Lost.
War for the Planet of the Apes was nominated in Visual Effects for Joe Letteri, Daniel Barrett, Dan Lemmon and Joel Whist" and Lost.
Call Me by Your Name was nominated in Writing (Adapted Screenplay) for Screenplay by James Ivory" and Won.
The Disaster Artist was nominated in Writing (Adapted Screenplay) for Screenplay by Scott Neustadter & Michael H. Weber" and Lost.
Logan was nominated in Writing (Adapted Screenplay) for Screenplay by Scott Frank & James Mangold and Michael Green; Story by James Mangold" and Lost.
Molly's Game was nominated in Writing (Adapted Screenplay) for Written for the screen by Aaron Sorkin" and Lost.
Mudbound was nominated in Writing (Adapted Screenplay) for Screenplay by Virgil Williams and Dee Rees" and Lost.
Get Out was nominated in Writing (Original Screenplay) for Written by Jordan Peele" and Won.
The Big Sick was nominated in Writing (Original Screenplay) for Written by Emily V. Gordon & Kumail Nanjiani" and Lost.
Lady Bird was nominated in Writing (Original Screenplay) for Written by Greta Gerwig" and Lost.
The Shape of Water was nominated in Writing (Original Screenplay) for Screenplay by Guillermo del Toro & Vanessa Taylor; Story by Guillermo del Toro" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Writing (Original Screenplay) for Written by Martin McDonagh" and Lost.
###Markdown
Include some flow control, if the name "actor" or "actree" appears in the category title, then do nominee name first and movie name second, otherwise do movie name first and nominee name second.
###Code
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
print("{0} was nominated in {1} for \"{2}\" and {3}.".format(name,category,movie,winner))
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
print("\"{0}\" was nominated in {1} for {2} and {3}.".format(name,category,movie,winner))
###Output
Gary Oldman was nominated in Actor in a Leading Role for "Darkest Hour" and Won.
Timothée Chalamet was nominated in Actor in a Leading Role for "Call Me by Your Name" and Lost.
Daniel Day-Lewis was nominated in Actor in a Leading Role for "Phantom Thread" and Lost.
Daniel Kaluuya was nominated in Actor in a Leading Role for "Get Out" and Lost.
Denzel Washington was nominated in Actor in a Leading Role for "Roman J. Israel, Esq." and Lost.
Sam Rockwell was nominated in Actor in a Supporting Role for "Three Billboards outside Ebbing, Missouri" and Won.
Willem Dafoe was nominated in Actor in a Supporting Role for "The Florida Project" and Lost.
Woody Harrelson was nominated in Actor in a Supporting Role for "Three Billboards outside Ebbing, Missouri" and Lost.
Richard Jenkins was nominated in Actor in a Supporting Role for "The Shape of Water" and Lost.
Christopher Plummer was nominated in Actor in a Supporting Role for "All the Money in the World" and Lost.
Frances McDormand was nominated in Actress in a Leading Role for "Three Billboards outside Ebbing, Missouri" and Won.
Sally Hawkins was nominated in Actress in a Leading Role for "The Shape of Water" and Lost.
Margot Robbie was nominated in Actress in a Leading Role for "I, Tonya" and Lost.
Saoirse Ronan was nominated in Actress in a Leading Role for "Lady Bird" and Lost.
Meryl Streep was nominated in Actress in a Leading Role for "The Post" and Lost.
Allison Janney was nominated in Actress in a Supporting Role for "I, Tonya" and Won.
Mary J. Blige was nominated in Actress in a Supporting Role for "Mudbound" and Lost.
Lesley Manville was nominated in Actress in a Supporting Role for "Phantom Thread" and Lost.
Laurie Metcalf was nominated in Actress in a Supporting Role for "Lady Bird" and Lost.
Octavia Spencer was nominated in Actress in a Supporting Role for "The Shape of Water" and Lost.
"Lee Unkrich and Darla K. Anderson" was nominated in Animated Feature Film for Coco and Won.
"Tom McGrath and Ramsey Naito" was nominated in Animated Feature Film for The Boss Baby and Lost.
"Nora Twomey and Anthony Leo" was nominated in Animated Feature Film for The Breadwinner and Lost.
"Carlos Saldanha and Lori Forte" was nominated in Animated Feature Film for Ferdinand and Lost.
"Dorota Kobiela, Hugh Welchman and Ivan Mactaggart" was nominated in Animated Feature Film for Loving Vincent and Lost.
"Roger A. Deakins" was nominated in Cinematography for Blade Runner 2049 and Won.
"Bruno Delbonnel" was nominated in Cinematography for Darkest Hour and Lost.
"Hoyte van Hoytema" was nominated in Cinematography for Dunkirk and Lost.
"Rachel Morrison" was nominated in Cinematography for Mudbound and Lost.
"Dan Laustsen" was nominated in Cinematography for The Shape of Water and Lost.
"Mark Bridges" was nominated in Costume Design for Phantom Thread and Won.
"Jacqueline Durran" was nominated in Costume Design for Beauty and the Beast and Lost.
"Jacqueline Durran" was nominated in Costume Design for Darkest Hour and Lost.
"Luis Sequeira" was nominated in Costume Design for The Shape of Water and Lost.
"Consolata Boyle" was nominated in Costume Design for Victoria & Abdul and Lost.
"Guillermo del Toro" was nominated in Directing for The Shape of Water and Won.
"Christopher Nolan" was nominated in Directing for Dunkirk and Lost.
"Jordan Peele" was nominated in Directing for Get Out and Lost.
"Greta Gerwig" was nominated in Directing for Lady Bird and Lost.
"Paul Thomas Anderson" was nominated in Directing for Phantom Thread and Lost.
"Bryan Fogel and Dan Cogan" was nominated in Documentary (Feature) for Icarus and Won.
"Steve James, Mark Mitten and Julie Goldman" was nominated in Documentary (Feature) for Abacus: Small Enough to Jail and Lost.
"Agnès Varda, JR and Rosalie Varda" was nominated in Documentary (Feature) for Faces Places and Lost.
"Feras Fayyad, Kareem Abeed and Søren Steen Jespersen" was nominated in Documentary (Feature) for Last Men in Aleppo and Lost.
"Yance Ford and Joslyn Barnes" was nominated in Documentary (Feature) for Strong Island and Lost.
"Frank Stiefel" was nominated in Documentary (Short Subject) for Heaven Is a Traffic Jam on the 405 and Won.
"Laura Checkoway and Thomas Lee Wright" was nominated in Documentary (Short Subject) for Edith+Eddie and Lost.
"Elaine McMillion Sheldon and Kerrin Sheldon" was nominated in Documentary (Short Subject) for Heroin(e) and Lost.
"Thomas Lennon" was nominated in Documentary (Short Subject) for Knife Skills and Lost.
"Kate Davis and David Heilbroner" was nominated in Documentary (Short Subject) for Traffic Stop and Lost.
"Lee Smith" was nominated in Film Editing for Dunkirk and Won.
"Paul Machliss and Jonathan Amos" was nominated in Film Editing for Baby Driver and Lost.
"Tatiana S. Riegel" was nominated in Film Editing for I, Tonya and Lost.
"Sidney Wolinsky" was nominated in Film Editing for The Shape of Water and Lost.
"Jon Gregory" was nominated in Film Editing for Three Billboards outside Ebbing, Missouri and Lost.
"Chile" was nominated in Foreign Language Film for A Fantastic Woman and Won.
"Lebanon" was nominated in Foreign Language Film for The Insult and Lost.
"Russia" was nominated in Foreign Language Film for Loveless and Lost.
"Hungary" was nominated in Foreign Language Film for On Body and Soul and Lost.
"Sweden" was nominated in Foreign Language Film for The Square and Lost.
"Kazuhiro Tsuji, David Malinowski and Lucy Sibbick" was nominated in Makeup and Hairstyling for Darkest Hour and Won.
"Daniel Phillips and Lou Sheppard" was nominated in Makeup and Hairstyling for Victoria & Abdul and Lost.
"Arjen Tuiten" was nominated in Makeup and Hairstyling for Wonder and Lost.
"Alexandre Desplat" was nominated in Music (Original Score) for The Shape of Water and Won.
"Hans Zimmer" was nominated in Music (Original Score) for Dunkirk and Lost.
"Jonny Greenwood" was nominated in Music (Original Score) for Phantom Thread and Lost.
"John Williams" was nominated in Music (Original Score) for Star Wars: The Last Jedi and Lost.
"Carter Burwell" was nominated in Music (Original Score) for Three Billboards outside Ebbing, Missouri and Lost.
"from Coco; Music and Lyric by Kristen Anderson-Lopez and Robert Lopez" was nominated in Music (Original Song) for Remember Me and Won.
"from Mudbound; Music and Lyric by Mary J. Blige, Raphael Saadiq and Taura Stinson" was nominated in Music (Original Song) for Mighty River and Lost.
"from Call Me by Your Name; Music and Lyric by Sufjan Stevens" was nominated in Music (Original Song) for Mystery Of Love and Lost.
"from Marshall; Music by Diane Warren; Lyric by Lonnie R. Lynn and Diane Warren" was nominated in Music (Original Song) for Stand Up For Something and Lost.
"from The Greatest Showman; Music and Lyric by Benj Pasek and Justin Paul" was nominated in Music (Original Song) for This Is Me and Lost.
"Guillermo del Toro and J. Miles Dale, Producers" was nominated in Best Picture for The Shape of Water and Won.
"Peter Spears, Luca Guadagnino, Emilie Georges and Marco Morabito, Producers" was nominated in Best Picture for Call Me by Your Name and Lost.
"Tim Bevan, Eric Fellner, Lisa Bruce, Anthony McCarten and Douglas Urbanski, Producers" was nominated in Best Picture for Darkest Hour and Lost.
"Emma Thomas and Christopher Nolan, Producers" was nominated in Best Picture for Dunkirk and Lost.
"Sean McKittrick, Jason Blum, Edward H. Hamm Jr. and Jordan Peele, Producers" was nominated in Best Picture for Get Out and Lost.
"Scott Rudin, Eli Bush and Evelyn O'Neill, Producers" was nominated in Best Picture for Lady Bird and Lost.
"JoAnne Sellar, Paul Thomas Anderson, Megan Ellison and Daniel Lupi, Producers" was nominated in Best Picture for Phantom Thread and Lost.
"Amy Pascal, Steven Spielberg and Kristie Macosko Krieger, Producers" was nominated in Best Picture for The Post and Lost.
"Graham Broadbent, Pete Czernin and Martin McDonagh, Producers" was nominated in Best Picture for Three Billboards outside Ebbing, Missouri and Lost.
"Production Design: Paul Denham Austerberry; Set Decoration: Shane Vieau and Jeffrey A. Melvin" was nominated in Production Design for The Shape of Water and Won.
"Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" was nominated in Production Design for Beauty and the Beast and Lost.
"Production Design: Dennis Gassner; Set Decoration: Alessandra Querzola" was nominated in Production Design for Blade Runner 2049 and Lost.
"Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" was nominated in Production Design for Darkest Hour and Lost.
"Production Design: Nathan Crowley; Set Decoration: Gary Fettis" was nominated in Production Design for Dunkirk and Lost.
"Glen Keane and Kobe Bryant" was nominated in Short Film (Animated) for Dear Basketball and Won.
"Victor Caire and Gabriel Grapperon" was nominated in Short Film (Animated) for Garden Party and Lost.
"Dave Mullins and Dana Murray" was nominated in Short Film (Animated) for Lou and Lost.
"Max Porter and Ru Kuwahata" was nominated in Short Film (Animated) for Negative Space and Lost.
"Jakob Schuh and Jan Lachauer" was nominated in Short Film (Animated) for Revolting Rhymes and Lost.
"Chris Overton and Rachel Shenton" was nominated in Short Film (Live Action) for The Silent Child and Won.
"Reed Van Dyk" was nominated in Short Film (Live Action) for DeKalb Elementary and Lost.
"Derin Seale and Josh Lawson" was nominated in Short Film (Live Action) for The Eleven O'Clock and Lost.
"Kevin Wilson, Jr." was nominated in Short Film (Live Action) for My Nephew Emmett and Lost.
"Katja Benrath and Tobias Rosen" was nominated in Short Film (Live Action) for Watu Wote/All of Us and Lost.
"Richard King and Alex Gibson" was nominated in Sound Editing for Dunkirk and Won.
"Julian Slater" was nominated in Sound Editing for Baby Driver and Lost.
"Mark Mangini and Theo Green" was nominated in Sound Editing for Blade Runner 2049 and Lost.
"Nathan Robitaille and Nelson Ferreira" was nominated in Sound Editing for The Shape of Water and Lost.
"Matthew Wood and Ren Klyce" was nominated in Sound Editing for Star Wars: The Last Jedi and Lost.
"Gregg Landaker, Gary A. Rizzo and Mark Weingarten" was nominated in Sound Mixing for Dunkirk and Won.
"Julian Slater, Tim Cavagin and Mary H. Ellis" was nominated in Sound Mixing for Baby Driver and Lost.
"Ron Bartlett, Doug Hemphill and Mac Ruth" was nominated in Sound Mixing for Blade Runner 2049 and Lost.
"Christian Cooke, Brad Zoern and Glen Gauthier" was nominated in Sound Mixing for The Shape of Water and Lost.
"David Parker, Michael Semanick, Ren Klyce and Stuart Wilson" was nominated in Sound Mixing for Star Wars: The Last Jedi and Lost.
"John Nelson, Gerd Nefzer, Paul Lambert and Richard R. Hoover" was nominated in Visual Effects for Blade Runner 2049 and Won.
"Christopher Townsend, Guy Williams, Jonathan Fawkner and Dan Sudick" was nominated in Visual Effects for Guardians of the Galaxy Vol. 2 and Lost.
"Stephen Rosenbaum, Jeff White, Scott Benza and Mike Meinardus" was nominated in Visual Effects for Kong: Skull Island and Lost.
"Ben Morris, Mike Mulholland, Neal Scanlan and Chris Corbould" was nominated in Visual Effects for Star Wars: The Last Jedi and Lost.
"Joe Letteri, Daniel Barrett, Dan Lemmon and Joel Whist" was nominated in Visual Effects for War for the Planet of the Apes and Lost.
"Screenplay by James Ivory" was nominated in Writing (Adapted Screenplay) for Call Me by Your Name and Won.
"Screenplay by Scott Neustadter & Michael H. Weber" was nominated in Writing (Adapted Screenplay) for The Disaster Artist and Lost.
"Screenplay by Scott Frank & James Mangold and Michael Green; Story by James Mangold" was nominated in Writing (Adapted Screenplay) for Logan and Lost.
"Written for the screen by Aaron Sorkin" was nominated in Writing (Adapted Screenplay) for Molly's Game and Lost.
"Screenplay by Virgil Williams and Dee Rees" was nominated in Writing (Adapted Screenplay) for Mudbound and Lost.
"Written by Jordan Peele" was nominated in Writing (Original Screenplay) for Get Out and Won.
"Written by Emily V. Gordon & Kumail Nanjiani" was nominated in Writing (Original Screenplay) for The Big Sick and Lost.
"Written by Greta Gerwig" was nominated in Writing (Original Screenplay) for Lady Bird and Lost.
"Screenplay by Guillermo del Toro & Vanessa Taylor; Story by Guillermo del Toro" was nominated in Writing (Original Screenplay) for The Shape of Water and Lost.
"Written by Martin McDonagh" was nominated in Writing (Original Screenplay) for Three Billboards outside Ebbing, Missouri and Lost.
###Markdown
Rather than printing out the information, store it in `nominees_2018` so that we can turn it into a DataFrame.
###Code
nominees_2018 = []
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
# Swap out the print
# Make a payload for each nominee
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':2018, # We're only looking at 2018 right now
'Winner':winner}
# Add the payload to the list of nominees at top
nominees_2018.append(nominee_payload)
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
# Swap out the print
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':2018,
'Winner':winner}
nominees_2018.append(nominee_payload)
###Output
_____no_output_____
###Markdown
Moment of truth!
###Code
nominees_df = pd.DataFrame(nominees_2018)
nominees_df
###Output
_____no_output_____
###Markdown
Now let's turn this hulking beast of a parser into a function so we can apply it to other years' nominees in the next step.
###Code
def parse_nominees(true_groups,year):
nominees_list = []
for group in true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':year, # We may look at other years
'Winner':winner}
nominees_list.append(nominee_payload)
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':year,
'Winner':winner}
nominees_list.append(nominee_payload)
return nominees_list
###Output
_____no_output_____
###Markdown
Iterating vs. parsing to retrieve dataOften the data you are interested in is spread across multiple web pages. In an ideal world, the naming conventions would let you retrieve the data from these pages systematically. In the case of the Oscars, the URLs appear to be consistently formatted: `https://www.oscars.org/oscars/ceremonies/2019` suggests that we could change the 2019 to any other date going back to the start of the Oscars and get that year as well: `https://www.oscars.org/oscars/ceremonies/2018` should get us the page for 2018, and so on. Let's demonstrate each of these strategies with the Oscars data: iterating from 2019 back to 1929 in the URL versus parsing the list of links from the header. Iterating strategies for retrieving dataThe fundamental assumption with this strategy is that the data are stored at URLs in a consistent way that we can access sequentially. In the case of the Oscars, we *should* be able to simply pass each year to the URL in requests. Here we want to practice responsible data scraping by including a sleep between each request so that we do not overwhelm the Oscars server with requests. We can use the `sleep` function within `time`.
###Code
from time import sleep
###Output
_____no_output_____
###Markdown
The `sleep(3)` below prevents any more code from progressing for 3 seconds.
###Code
print("The start of something.")
sleep(3)
print("The end of something.")
###Output
The start of something.
The end of something.
###Markdown
The core part of the iterating strategy is simply using Python's [`range`](https://docs.python.org/3.7/library/functions.htmlfunc-range) function to generate a sequence of values. Here, we can use `range` to print out a sequence of URLs that should correspond to awards pages from 2010 through 2019. We can also incorporate the `sleep` functionality and wait a second between each `print` statement—it should now take 10 seconds for this code to finish printing. This simulates how we can use `sleep` to slow down and spread out requests so that we do not overwhelm the servers whose data we are trying to scrape.
###Code
for year in range(2010,2020):
sleep(1)
print('https://www.oscars.org/oscars/ceremonies/{0}'.format(year))
###Output
https://www.oscars.org/oscars/ceremonies/2010
https://www.oscars.org/oscars/ceremonies/2011
https://www.oscars.org/oscars/ceremonies/2012
https://www.oscars.org/oscars/ceremonies/2013
https://www.oscars.org/oscars/ceremonies/2014
https://www.oscars.org/oscars/ceremonies/2015
https://www.oscars.org/oscars/ceremonies/2016
https://www.oscars.org/oscars/ceremonies/2017
https://www.oscars.org/oscars/ceremonies/2018
https://www.oscars.org/oscars/ceremonies/2019
###Markdown
We defined a function `parse_nominees` above that takes the "true groups" of nominees. Let's try to tie these pieces together for all the nominees in the 2010s.
###Code
# Create an empty list to store the data we get
all_years_nominees = dict()
# For each year starting in 2010 until 2019
for year in range(2010,2020):
# Pause for a second between each request
sleep(1)
# Get the raw HTML
year_raw_html = requests.get('https://www.oscars.org/oscars/ceremonies/{0}'.format(year)).text
# Soup-ify
year_souped_html = BeautifulSoup(year_raw_html)
# Get the parent group
year_parent_group = year_souped_html.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Get the true groups under the parent group
year_true_groups = year_parent_group[0].find_all('div',{'class':'view-grouping'})
# Use our parsing function, passing the year from above
year_nominees = parse_nominees(year_true_groups,year)
# Convert the year_nominees to a DataFrame and add them to all_years_nominees
all_years_nominees[year] = pd.DataFrame(year_nominees)
###Output
_____no_output_____
###Markdown
Combine each of the DataFrames in `all_years_nominees` into a giant DataFrame of all the nominees from 2010-2019.
###Code
all_years_nominees_df = pd.concat(all_years_nominees)
all_years_nominees_df.reset_index(drop=True).head(10)
###Output
_____no_output_____
###Markdown
Parsing strategy for retrieving dataFrustratingly, this iterating strategy may not always hold: maybe some years are skipped or the naming convention changes at some point. We will cover some basics of [error-handling in Python](https://realpython.com/python-exceptions/) that could let us work around errors as they pop up, but this may result in an incomplete collection if the naming conventions are systematic. What we would want to do is to identify all the links ahead of time by parsing them from list and then work through that list to get the complete data collection.What this means in the context of our Oscars example is assuming that we cannot trust that the sequential numbering of the years is a realiable guide to get all the data. Instead, we should get a list of the URLs for each of the awards pages from the "ceremonies-decade-scroller" (from Inspect) at the top. This scroller *should* be consistent across all the pages, but start with the nominees for 2019 just to be safe:
###Code
oscars2019_raw = requests.get('https://www.oscars.org/oscars/ceremonies/2019').text
oscars2019_soup = BeautifulSoup(oscars2019_raw)
###Output
_____no_output_____
###Markdown
Using the Inspect tool, there is a `` that contains the links to each of the years. Run a `.find_all` to get all these href locations.
###Code
# Get the <div class="years"> as a parent tag first, just in case there are <a class="years"> elsewhere
oscars2019_years_div = oscars2019_soup.find_all('div',{'class':'years'})[0]
# Now get the <a class="years"> underneath only the oscars2019_years_div
oscars2019_years_a = oscars2019_years_div.find_all('a',{'class':'year'})
# Inspect the first 10
oscars2019_years_a[:10]
###Output
_____no_output_____
###Markdown
Each of these `` tags contains an "href", or the URL element where the page lives, and a text element for what's displayed.
###Code
oscars2019_years_a[0]['href']
oscars2019_years_a[0].text
###Output
_____no_output_____
###Markdown
Now we can write a loop to print out the URL locations for all the other award years based on the "official" links in the "ceremonies-decade-scroller" navigation rather than assuming the years are sequential—I promise this will pay dividends in the future when inconsistent design wreaks havoc on your sequential data strategies!
###Code
for a in oscars2019_years_a[-10:]:
href = a['href']
print('https://www.oscars.org' + href)
###Output
https://www.oscars.org/oscars/ceremonies/2010
https://www.oscars.org/oscars/ceremonies/2011
https://www.oscars.org/oscars/ceremonies/2012
https://www.oscars.org/oscars/ceremonies/2013
https://www.oscars.org/oscars/ceremonies/2014
https://www.oscars.org/oscars/ceremonies/2015
https://www.oscars.org/oscars/ceremonies/2016
https://www.oscars.org/oscars/ceremonies/2017
https://www.oscars.org/oscars/ceremonies/2018
https://www.oscars.org/oscars/ceremonies/2019
###Markdown
We can now use the `parse_nominees` function for these pages as well.
###Code
# Create an empty list to store the data we get
all_years_nominees = dict()
# For the 10 most recent years
for a in oscars2019_years_a[-10:]:
# Pause for a second between each request
sleep(1)
# Get the href
href = a['href']
# Get the year
year = a.text
# Get the raw HTML
url = 'https://www.oscars.org' + href
year_raw_html = requests.get(url).text
# Soup-ify
year_souped_html = BeautifulSoup(year_raw_html)
# Get the parent group
year_parent_group = year_souped_html.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Get the true groups under the parent group
year_true_groups = year_parent_group[0].find_all('div',{'class':'view-grouping'})
# Use our parsing function, passing the year from above
year_nominees = parse_nominees(year_true_groups,year)
# Convert the year_nominees to a DataFrame and add them to all_years_nominees
all_years_nominees[year] = pd.DataFrame(year_nominees)
###Output
_____no_output_____
###Markdown
Combine each of the DataFrames in `all_years_nominees` into a giant DataFrame of all the nominees from 2010-2019.
###Code
all_years_nominees_df = pd.concat(all_years_nominees)
all_years_nominees_df.reset_index(drop=True).head(10)
###Output
_____no_output_____
###Markdown
Web Data Scraping AcknowledgementsThese notebooks are adaptations from a 5 session mini course at the University of Colorado. The github repo can be found [here](https://github.com/CU-ITSS/Web-Data-Scraping-S2019) [Spring 2019 ITSS Mini-Course] The course is taught by [Brian C. Keegan, Ph.D.](http://brianckeegan.com/) [Assistant Professor, Department of Information Science](https://www.colorado.edu/cmci/people/information-science/brian-c-keegan). They have been adapted for relevant content and integration with Docker so that we all have the same environment. Professor Keegan suggests using a most recent version of Python (3.7) which is set in the `requirements.txt` file.The Spring ITSS Mini-Course was adapted from a number of sources including [Allison Morgan](https://allisonmorgan.github.io/) for the [2018 Summer Institute for Computational Social Science](https://github.com/allisonmorgan/sicss_boulder), which were in turn derived from [other resources](https://github.com/simonmunzert/web-scraping-with-r-extended-edition) developed by [Simon Munzert](http://simonmunzert.github.io/) and [Chris Bail](http://www.chrisbail.net/). This notebook is adapted from excellent notebooks in Dr. [Cody Buntain](http://cody.bunta.in/)'s seminar on [Social Media and Crisis Informatics](http://cody.bunta.in/teaching/2018_winter_umd_inst728e/) as well as the [PRAW documentation](https://praw.readthedocs.io/en/latest/). Parsing HTML data into tabular dataThe overall goal we have as researchers in scraping data from the web is converting data from one structured format (HTML's tree-like structures) into another structured format (probably a tabular structure with rows and columns). This could involve simply reading tables out of a webpage all the way up to taking irregularly-structured HTML elements into a tabular format. We are going to make some use of the [`pandas`](https://pandas.pydata.org/) library ("**pan**el **da**ta", not the cute animal), which is Python's implementation of a data frame concept. This is a very powerful and complex library that I typically spend more than 12 hours of lecture teaching in intermediate programming classes. I hope to convey some important elements as we work through material, but it is far beyond the scope of this class to be able to cover all the fundamentals and syntax. Let's begin by importing the libraries we'll need in this notebook: requests, BeautifulSoup, and pandas
###Code
# Most straight-forward way to import a librayr in Python
import requests
# BeautifulSoup is a module inside the "bs4" library, we only import the BeautifulSoup module
from bs4 import BeautifulSoup
# We import pandas but give the library a shortcut alias "pd" since we will call its functions so much
import pandas as pd
###Output
_____no_output_____
###Markdown
Reading an HTML table into Python[The Numbers](http://www.the-numbers.com) is a popular source of data about movies' box office revenue numbers. Their daily domestic charts are HTML tables with the top-grossing movies for each day of the year, going back for several years. This [table](https://www.the-numbers.com/box-office-chart/daily/2018/12/25) for Christmas day in 2018 has coluns for the current week's ranking, previous week's ranking, name of movie, distributor, gross, change over the previous week, number of theaters, revenue per theater, total gross, and number of days since release. This looks like a fairly straightforward table that could be read directly into data frame-like structure.Using the Inspect tool, we can see the table exists as a `` element with child tags like `` and `` (table row). Each `` has `` which defines each of the cells and their content. For more on how HTML defines tables, check out [this tutoral](https://www.w3schools.com/html/html_tables.asp).Using `requests` and `BeautifulSoup` we would get this webpage's HTML, turn it into soup, and then find the table (``) or the table rows (``) and pull out their content.
###Code
# Make the request
xmas_bo_raw = requests.get('https://www.the-numbers.com/box-office-chart/daily/2018/12/25').text
# Turn into soup, specify the HTML parser
xmas_bo_soup = BeautifulSoup(xmas_bo_raw,'html.parser')
# Use .find_all to retrieve all the tables in the page
xmas_bo_tables = xmas_bo_soup.find_all('table')
###Output
_____no_output_____
###Markdown
It turns out there are two tables on the page, the first is a baby table consisting of the "Previous Chart", "Chart Index", and "Next Chart" at the top. We want the second table with all the data: `xmas_bo_tables[1]` returns the second chart (remember that Python is 0-indexed, so the first chart is at `xmas_bo_tables[0]`). With this table identified, we can do a second `find_all` to get the table rows inside it and we save it as `xmas_bo_trs`.
###Code
xmas_bo_trs = xmas_bo_tables[1].find_all('tr')
###Output
_____no_output_____
###Markdown
Let's inspect a few of these rows. The first row in our list of rows under `xmas_bo_trs` should be the header with the names of the columns.
###Code
xmas_bo_trs[0]
###Output
_____no_output_____
###Markdown
The next table row should be for Aquaman.
###Code
xmas_bo_trs[1]
###Output
_____no_output_____
###Markdown
If we wanted to access the contents of this table row, we could use the `.contents` method to get a list of each of the `` table cells, which (frustratingly) intersperses newline characters.
###Code
xmas_bo_trs[1].contents
###Output
_____no_output_____
###Markdown
Another alternative is to use the `.text` method to get the text content of all the cells in this row.
###Code
xmas_bo_trs[1].text
###Output
_____no_output_____
###Markdown
The `\n` characters re-appear here, but if we `print` out this statement, we see their newline functionality.
###Code
print(xmas_bo_trs[1].text)
###Output
1
(1)
Aquaman
Warner Bros.
$21,982,419
+103%
4,125
$5,329
$105,407,869
5
###Markdown
We could use string processing to take this text string and convert it into a simple list of data. `.split('\n')` will split the string on the newline characters and return a list of what exists in between.
###Code
xmas_bo_trs[1].text.split('\n')
###Output
_____no_output_____
###Markdown
We'll write a `for` loop to go through all the table rows in `xmas_bo_trs`, get the list of data from the row, and add it back to a list of all the rows.
###Code
cleaned_xmas_bo_rows = []
# Loop through all the non-header (first row) table rows
for row in xmas_bo_trs[1:]:
# Get the text of the row and split on the newlines (like above)
cleaned_row = row.text.split('\n')
# Add this cleaned row back to the external list of row data
cleaned_xmas_bo_rows.append(cleaned_row)
# Inspect the first few rows of data
cleaned_xmas_bo_rows[:2]
###Output
_____no_output_____
###Markdown
Now we can pass this list of lists in `cleaned_xmas_bo_rows` to pandas's `DataFrame` function and hopefully get a nice table out.
###Code
xmas_bo_df = pd.DataFrame(cleaned_xmas_bo_rows)
# Inspect
xmas_bo_df.head()
###Output
_____no_output_____
###Markdown
We need to do a bit of cleanup on this data:* Columns 0 and 11 are all empty* Add column names
###Code
# Drop columns 0 and 11 and overwrite the xmas_box_df variable
xmas_bo_df = xmas_bo_df.drop(columns=[0,11])
# Rename the columns
xmas_bo_df.columns = ['Rank','Last rank','Movie','Distributor','Gross',
'Change','Theaters','Per theater','Total gross',
'Days']
# Write to disk
# xmas_bo_df.to_csv('christmas_2018_box_office.csv',encoding='utf8')
# Inspect
xmas_bo_df.head()
###Output
_____no_output_____
###Markdown
`pandas`'s `read_html`That was a good amount of work just to get this simple HTML table into Python. But it was important to cover how table elements moved from a string in `requests`, into a soup object from `BeautifulSoup`. into a list of data, and finally into `pandas`. `pandas` also has powerful functionality for reading tables directly from HTML. If we convert the soup of the first table (`xmas_bo_tables[1]`) back into a string, `pandas` can read it directly into a table. There are a few ideosyncracies here, the result is a list of dataframes—even if there's only a single table/dataframe—so we need to return the first (and only) element of this list. This is why there's a `[0]` at the end and the `.head()` is just to show the first five rows.
###Code
xmas_bo_table_as_string = str(xmas_bo_tables[1])
pd.read_html(xmas_bo_table_as_string)[0].head()
###Output
_____no_output_____
###Markdown
The column names got lumped in as rows, but we can fix this as well with the `read_html` function by passing the row index where the column lives. In this case, it is the first row, so we pass `header=0`.
###Code
pd.read_html(xmas_bo_table_as_string,header=0)[0].head()
###Output
_____no_output_____
###Markdown
Finally, you can point `read_html` at a URL without any `requests` or `BeautifulSoup` and get all the tables on the page as a list of DataFrames. `pandas` is simply doing the `requests` and `BeautifulSoup` on the inside. Interestingly, I'm getting a [HTTP 403](https://en.wikipedia.org/wiki/HTTP_403) error indicating the server (The Numbers) is forbidding the client (us) from accessing their data using this strategy. We will discuss next week whether and how to handle situations where web servers refuse connections from non-human clients. In this case, you cannot use the off-the-shelf `read_html` approach and would need to revert to using the `requests`+`BeautifulSoup` approach above.
###Code
simple_tables = pd.read_html('https://www.the-numbers.com/box-office-chart/daily/2018/12/25')
simple_tables
###Output
_____no_output_____
###Markdown
If we point it at Wikipedia's [2018 in film](https://en.wikipedia.org/wiki/2018_in_film), it will pull all of the tables present on the page.
###Code
simple_tables = pd.read_html('https://en.wikipedia.org/wiki/2018_in_film')
###Output
_____no_output_____
###Markdown
The first three correspond to the "Year in film" navigation box on the side and are poorly-formatted by default.
###Code
simple_tables[0]
###Output
_____no_output_____
###Markdown
The fourth table in the `simple_tables` list we got from parsing the Wikipedia page with `read_html` is the table under the "Highest-grossing films" section.
###Code
simple_tables[3]
###Output
_____no_output_____
###Markdown
You can pass the "header" option in `read_html` to make sure the column names from a particular row (in this case the first row) do not accidentally become rows of data.
###Code
wiki_top_grossing_t = pd.read_html('https://en.wikipedia.org/wiki/2018_in_film',header=0)[3]
wiki_top_grossing_t
###Output
_____no_output_____
###Markdown
Note that there are still a few errors in this table because the "Disney" value in the Wikipedia table spans two rows and `read_html` thus skips the "Distributor" value for Black Panther.
###Code
# Copy the value at index position 1, column position Distributor to Wordwide gross
wiki_top_grossing_t.loc[1,'Worldwide gross'] = wiki_top_grossing_t.loc[1,'Distributor']
# Change the value at 1, Distributor to Disney
wiki_top_grossing_t.loc[1,'Distributor'] = 'Disney'
wiki_top_grossing_t
###Output
_____no_output_____
###Markdown
Writing your own parserWe will return to the historical Oscars data. Even though data as prominent as this is likely to already exist in tabular format somewhere, we will maintain the illusion that we are the first to both scrape it and parse it into a tabular format. Our goal here is to write a parser that will (ideally) work across multiple pages; in this case, each of the award years.One of the first things we should do before writing any code is come up with a model of what we want our data to look like at the end of this. This is an intuitive and "tidy" format, but you might come up with alternatives based on your analysis and modeling needs.| *Year* | *Category* | *Nominee* | *Movie* | *Won* || --- | --- | --- | --- | --- || 2019 | Actor in a leading role | Christian Bale | Vice | NA || 2019 | Actor in a leading role | Bradley Cooper | A Star Is Born | NA || 2019 | Actor in a leading role | Willem Dafoe | At Eternity's Gate | NA || 2019 | Actor in a leading role | Rami Malek | Bohemian Rhapsody | NA || 2019 | Actor in a leading role | Viggo Mortensen | Green Book | NA |We will begin with writing a parser for a (hopefully!) representative year, then scrape the data for all the years, then apply the scraper to each of those years, and finally combine all the years' data together into a large data set. Let's begin with writing a parser for a (hopefully!) representative year: in this case, 2019 is actually not a great case because it is missing information about who won and lost since (at the time of my writing this notebook) the winners had not been announced. We will use 2018 instead and make the profoundly naïve assumption it should work the same going back in time.Start off with using `requests` to get the data and then use `BeautifulSoup` to turn it into soup we can parse through.
###Code
oscars2018_raw = requests.get('https://www.oscars.org/oscars/ceremonies/2018').text
oscars2018_soup = BeautifulSoup(oscars2018_raw)
###Output
_____no_output_____
###Markdown
Using the Inspect tool exercise from Class 1, the `` seems to be the most promising tag for us to extract. Use `.find_all('div',{'class':'view-grouping'})` to (hopefully!) get all of these award groups. Inspect the first and last ones to make sure they looks coherent.
###Code
# Get all the groups that have a <div class="view-grouping"> tag
oscars2018_groups = oscars2018_soup.find_all('div',{'class':'view-grouping'})
# Inspect the first one
oscars2018_groups[0]
###Output
_____no_output_____
###Markdown
The last group is something besides "Writing (Original Screenplay)" and it's not clear to me where this tag's content renders on the page.
###Code
# Inspect the last one
oscars2018_groups[-1]
###Output
_____no_output_____
###Markdown
This puts us into something of a bind going forward: if the `.find_all` returns more groupings than we expected, then it's not sufficiently precise to identify *only* groupings of nominees. However, there do not appear to be any child tags in the `oscars2018_groups[0]` grouping that uniquely differentiate them from the child tags present in the `oscars2018_groups[-1]` grouping. Another alternative is to simple parse the first 24 groupings, but this is a very brittle solution since other years' awards might have more or fewer groupings.
###Code
len(oscars2018_groups)
###Output
_____no_output_____
###Markdown
Navigating the HTML tree to find more specific parent elementsA third alternative is to leverage the tree structure of HTML and get the parent element in the hopes it is more unique than its children. In this case something like `` is a promising lead. Use `find_all` to search for this tag and confirm there is only one the one `` element (with its children) rather than multiple `` elements matching "quicktabs-container-honorees".
###Code
# Get the new tag group
oscars2018_parent_group = oscars2018_soup.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Hopefully there is only one group matching this pattern
len(oscars2018_parent_group)
###Output
_____no_output_____
###Markdown
So far so good, now we can use `find_all` on the soup for this `` to search *within* this specific parent group and hopefully there should be the 24 awards groupings. Nope, still 105. This is because
###Code
# Note the addition of the [0] since the _parent_group is a list with 1 element in it
# We just extract that single element (which is a soup) and then we can use find_all on it
oscars2018_true_groups = oscars2018_parent_group[0].find_all('div',{'class':'view-grouping'})
len(oscars2018_true_groups)
###Output
_____no_output_____
###Markdown
Hallelujah! The award names for each group live inside a ``, so we can `find_all` for those, loop through each, and print out the name.
###Code
for group in oscars2018_parent_group[0].find_all('div',{'class':'view-grouping-header'}):
print(group.text)
###Output
Actor in a Leading Role
Actor in a Supporting Role
Actress in a Leading Role
Actress in a Supporting Role
Animated Feature Film
Cinematography
Costume Design
Directing
Documentary (Feature)
Documentary (Short Subject)
Film Editing
Foreign Language Film
Makeup and Hairstyling
Music (Original Score)
Music (Original Song)
Best Picture
Production Design
Short Film (Animated)
Short Film (Live Action)
Sound Editing
Sound Mixing
Visual Effects
Writing (Adapted Screenplay)
Writing (Original Screenplay)
###Markdown
It turns out that the Oscars site loads a bunch of extra data that it does not render that lives underneath a `` which is where the 81 extra "awards" come from. This appears to be an attempt to organize the page by film, rather than by category. Navigating the HTML tree from a specific child to find specific generic parentsNow bear with me through some additional and presently unnecessary pain. Above, we were able to isolate the 24 category groupings we wanted through finding an appropriate *parent* tag and then working *down*. But I also want to show how we could identify the same 24 category groups by finding an appropriate *child* tag and working back up. This could be helpful in other situations where the elements are hard to disambiguate.Let's start by finding the tag for the "Actor in a Leading Role" from the soup containing all the tags.
###Code
oscars2018_groups[0]
###Output
_____no_output_____
###Markdown
Rather than finding *all* the `` present in the page, we only want the 23 *siblings* of this specific tag. We can use the `find_next_siblings()` to get these 23 siblings. I do not like this method very much because you have to find the "eldest" sibling and then combine it with its siblings later on if you want all the children. In this case, you'd need to keep track of the `` corresponding to Best Actor and then combine it with its 23 siblings, rather than an approach that simply returns all 24 in a single list.
###Code
oscars2018_group0_next_siblings = oscars2018_groups[0].find_next_siblings()
len(oscars2018_group0_next_siblings)
###Output
_____no_output_____
###Markdown
We could also go up to get the parent and then find all 24 of the `` among the children.
###Code
# From the child we like, get its parent
oscars2018_group0_parent = oscars2018_groups[0].parent
# Now with the parent, find all the relevant children
oscars2018_group0_parent_children = oscars2018_group0_parent.find_all('div',{'class':'view-grouping'})
# Confirm
len(oscars2018_group0_parent_children)
###Output
_____no_output_____
###Markdown
Checking the relevant fieldsThat seemed like a major digression away from the core task of writing a parser, but it is critical that we write a parser that parses *only* the data we want and nothing else. Now that we have our 24 awards groups in `oscars2018_true_groups`, let's break one open and extract all the yummy data waiting inside.There are a few `` sub-classes that are helpfully named that should make extracting this data a bit easier.* `` - name of the category* `` - winner* `` - name of actor* `` - title of movie
###Code
oscars2018_true_groups[0]
###Output
_____no_output_____
###Markdown
"Zoom in" to the `views-field-field-actor-name`.
###Code
oscars2018_true_groups[0].find_all('div',{'class':"views-field views-field-field-actor-name"})
###Output
_____no_output_____
###Markdown
These `` tags may be more specific and helpful.
###Code
oscars2018_true_groups[0].find_all('h4')
###Output
_____no_output_____
###Markdown
Zoom into the `views-field-title`.
###Code
oscars2018_true_groups[0].find_all('div',{'class':"views-field views-field-title"})
###Output
_____no_output_____
###Markdown
These `` tags may be more specific and helpful, but there are also empty tags here clogging things up.
###Code
oscars2018_true_groups[0].find_all('span')
###Output
_____no_output_____
###Markdown
As a battle-scarred web scraper, let me continue to emphasize the importance of quick-checking your assumptions before commiting to writing code. Are these fields still appropriate for other awards categories? Let's check the last category for original screenplay. Are the ``s for "field-actor-name" still people and for "field-title" still movies? Nope. Looking back at the web page, it's now obvious that the movie title and person who gets the award are flipped between actors/actresses and the other awards categories. We're going to have to keep this in mind going forward!
###Code
oscars2018_true_groups[-1].find_all('div',{'class':"views-field views-field-field-actor-name"})
oscars2018_true_groups[-1].find_all('div',{'class':"views-field views-field-title"})
###Output
_____no_output_____
###Markdown
Writing the core parser functionalityHow will we map the contents of the HTML to the * **Year**: All the awards are from the same year, also in the URL* **Category**: ``* **Nominee**: `` for actors, `` for non-actors* **Movie**: `` for actors, `` for non-actors* **Won**: `` for sibling, 0 for everyone else; alternatively just the top nominee
###Code
oscars2018_true_groups[0]
category = oscars2018_true_groups[0].find_all('h2')[0].text
print("The name of the category is:",category)
names = []
for _nominee in oscars2018_true_groups[0].find_all('h4'):
nominee_name = _nominee.text
names.append(nominee_name)
print("The name of a nominee is:",nominee_name)
movies = []
for _movie in oscars2018_true_groups[0].find_all('span'):
if len(_movie.text) > 0:
movie_name = _movie.text.strip()
movies.append(movie_name)
print("The name of a movie is:",movie_name)
###Output
The name of a movie is: Winner
The name of a movie is: Darkest Hour
The name of a movie is: Call Me by Your Name
The name of a movie is: Phantom Thread
The name of a movie is: Get Out
The name of a movie is: Roman J. Israel, Esq.
###Markdown
One strategy is to use Python's [`zip`](https://docs.python.org/3.7/library/functions.htmlzip) library to combine elements from different lists together. But `zip` is a bit too slick and abstract for my tastes.
###Code
# The elements of each list being combined need to be the same size
# So we make a list of the category name and multiply it by 5 to make it the same size as the others
list(zip([category]*5,names,movies))
###Output
_____no_output_____
###Markdown
Another strategy is to use the ``s for each nominee and extract the relevant information from its subdivs. This is a bit more intuitive in the sense of reading from top to bottom and also makes it easier to capture the winner and losers based on position.
###Code
actor_nominees = oscars2018_true_groups[0].find_all('div',{'class':'views-row'})
for i,nominee in enumerate(actor_nominees):
# If in the first position, the nominee won
if i == 0:
winner = 'Won'
# Otherwise, the nominee lost
else:
winner = 'Lost'
# Get a list of all the sub-divs
subdivs = nominee.find_all('div')
# The first subdiv (for an actor) is the name
name = subdivs[0].text.strip()
# The second subdiv (for an actor) is the movie name
movie = subdivs[1].text.strip()
print("{0} was nominated for \"{1}\" and {2}.".format(name,movie,winner))
###Output
Gary Oldman was nominated for "Darkest Hour" and Won.
Timothée Chalamet was nominated for "Call Me by Your Name" and Lost.
Daniel Day-Lewis was nominated for "Phantom Thread" and Lost.
Daniel Kaluuya was nominated for "Get Out" and Lost.
Denzel Washington was nominated for "Roman J. Israel, Esq." and Lost.
###Markdown
Check that reversing "movie" and "name" works for another award category like original screenplay (`oscars2018_true_groups[-1]`). There's some weirdness with "Written by" and "Story by" filtering in here rather than simply names that may need to get fixed in the final calculation, but I would want to talk to a domain expert about the differences between these labels.
###Code
original_screenplay_nominees = oscars2018_true_groups[-1].find_all('div',{'class':'views-row'})
for i,nominee in enumerate(original_screenplay_nominees):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
# movie and name reversed
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
print("{0} was nominated for \"{1}\" and {2}.".format(name,movie,winner))
###Output
Written by Jordan Peele was nominated for "Get Out" and Won.
Written by Emily V. Gordon & Kumail Nanjiani was nominated for "The Big Sick" and Lost.
Written by Greta Gerwig was nominated for "Lady Bird" and Lost.
Screenplay by Guillermo del Toro & Vanessa Taylor; Story by Guillermo del Toro was nominated for "The Shape of Water" and Lost.
Written by Martin McDonagh was nominated for "Three Billboards outside Ebbing, Missouri" and Lost.
###Markdown
This was just for Best Actors, now lets add another layer for all the different awards categories. We can see the movie name and awardee switch is important now since most of the categories are reversed.
###Code
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
print("{0} was nominated in {1} for {2}\" and {3}.".format(name,category,movie,winner))
###Output
Gary Oldman was nominated in Actor in a Leading Role for Darkest Hour" and Won.
Timothée Chalamet was nominated in Actor in a Leading Role for Call Me by Your Name" and Lost.
Daniel Day-Lewis was nominated in Actor in a Leading Role for Phantom Thread" and Lost.
Daniel Kaluuya was nominated in Actor in a Leading Role for Get Out" and Lost.
Denzel Washington was nominated in Actor in a Leading Role for Roman J. Israel, Esq." and Lost.
Sam Rockwell was nominated in Actor in a Supporting Role for Three Billboards outside Ebbing, Missouri" and Won.
Willem Dafoe was nominated in Actor in a Supporting Role for The Florida Project" and Lost.
Woody Harrelson was nominated in Actor in a Supporting Role for Three Billboards outside Ebbing, Missouri" and Lost.
Richard Jenkins was nominated in Actor in a Supporting Role for The Shape of Water" and Lost.
Christopher Plummer was nominated in Actor in a Supporting Role for All the Money in the World" and Lost.
Frances McDormand was nominated in Actress in a Leading Role for Three Billboards outside Ebbing, Missouri" and Won.
Sally Hawkins was nominated in Actress in a Leading Role for The Shape of Water" and Lost.
Margot Robbie was nominated in Actress in a Leading Role for I, Tonya" and Lost.
Saoirse Ronan was nominated in Actress in a Leading Role for Lady Bird" and Lost.
Meryl Streep was nominated in Actress in a Leading Role for The Post" and Lost.
Allison Janney was nominated in Actress in a Supporting Role for I, Tonya" and Won.
Mary J. Blige was nominated in Actress in a Supporting Role for Mudbound" and Lost.
Lesley Manville was nominated in Actress in a Supporting Role for Phantom Thread" and Lost.
Laurie Metcalf was nominated in Actress in a Supporting Role for Lady Bird" and Lost.
Octavia Spencer was nominated in Actress in a Supporting Role for The Shape of Water" and Lost.
Coco was nominated in Animated Feature Film for Lee Unkrich and Darla K. Anderson" and Won.
The Boss Baby was nominated in Animated Feature Film for Tom McGrath and Ramsey Naito" and Lost.
The Breadwinner was nominated in Animated Feature Film for Nora Twomey and Anthony Leo" and Lost.
Ferdinand was nominated in Animated Feature Film for Carlos Saldanha and Lori Forte" and Lost.
Loving Vincent was nominated in Animated Feature Film for Dorota Kobiela, Hugh Welchman and Ivan Mactaggart" and Lost.
Blade Runner 2049 was nominated in Cinematography for Roger A. Deakins" and Won.
Darkest Hour was nominated in Cinematography for Bruno Delbonnel" and Lost.
Dunkirk was nominated in Cinematography for Hoyte van Hoytema" and Lost.
Mudbound was nominated in Cinematography for Rachel Morrison" and Lost.
The Shape of Water was nominated in Cinematography for Dan Laustsen" and Lost.
Phantom Thread was nominated in Costume Design for Mark Bridges" and Won.
Beauty and the Beast was nominated in Costume Design for Jacqueline Durran" and Lost.
Darkest Hour was nominated in Costume Design for Jacqueline Durran" and Lost.
The Shape of Water was nominated in Costume Design for Luis Sequeira" and Lost.
Victoria & Abdul was nominated in Costume Design for Consolata Boyle" and Lost.
The Shape of Water was nominated in Directing for Guillermo del Toro" and Won.
Dunkirk was nominated in Directing for Christopher Nolan" and Lost.
Get Out was nominated in Directing for Jordan Peele" and Lost.
Lady Bird was nominated in Directing for Greta Gerwig" and Lost.
Phantom Thread was nominated in Directing for Paul Thomas Anderson" and Lost.
Icarus was nominated in Documentary (Feature) for Bryan Fogel and Dan Cogan" and Won.
Abacus: Small Enough to Jail was nominated in Documentary (Feature) for Steve James, Mark Mitten and Julie Goldman" and Lost.
Faces Places was nominated in Documentary (Feature) for Agnès Varda, JR and Rosalie Varda" and Lost.
Last Men in Aleppo was nominated in Documentary (Feature) for Feras Fayyad, Kareem Abeed and Søren Steen Jespersen" and Lost.
Strong Island was nominated in Documentary (Feature) for Yance Ford and Joslyn Barnes" and Lost.
Heaven Is a Traffic Jam on the 405 was nominated in Documentary (Short Subject) for Frank Stiefel" and Won.
Edith+Eddie was nominated in Documentary (Short Subject) for Laura Checkoway and Thomas Lee Wright" and Lost.
Heroin(e) was nominated in Documentary (Short Subject) for Elaine McMillion Sheldon and Kerrin Sheldon" and Lost.
Knife Skills was nominated in Documentary (Short Subject) for Thomas Lennon" and Lost.
Traffic Stop was nominated in Documentary (Short Subject) for Kate Davis and David Heilbroner" and Lost.
Dunkirk was nominated in Film Editing for Lee Smith" and Won.
Baby Driver was nominated in Film Editing for Paul Machliss and Jonathan Amos" and Lost.
I, Tonya was nominated in Film Editing for Tatiana S. Riegel" and Lost.
The Shape of Water was nominated in Film Editing for Sidney Wolinsky" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Film Editing for Jon Gregory" and Lost.
A Fantastic Woman was nominated in Foreign Language Film for Chile" and Won.
The Insult was nominated in Foreign Language Film for Lebanon" and Lost.
Loveless was nominated in Foreign Language Film for Russia" and Lost.
On Body and Soul was nominated in Foreign Language Film for Hungary" and Lost.
The Square was nominated in Foreign Language Film for Sweden" and Lost.
Darkest Hour was nominated in Makeup and Hairstyling for Kazuhiro Tsuji, David Malinowski and Lucy Sibbick" and Won.
Victoria & Abdul was nominated in Makeup and Hairstyling for Daniel Phillips and Lou Sheppard" and Lost.
Wonder was nominated in Makeup and Hairstyling for Arjen Tuiten" and Lost.
The Shape of Water was nominated in Music (Original Score) for Alexandre Desplat" and Won.
Dunkirk was nominated in Music (Original Score) for Hans Zimmer" and Lost.
Phantom Thread was nominated in Music (Original Score) for Jonny Greenwood" and Lost.
Star Wars: The Last Jedi was nominated in Music (Original Score) for John Williams" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Music (Original Score) for Carter Burwell" and Lost.
Remember Me was nominated in Music (Original Song) for from Coco; Music and Lyric by Kristen Anderson-Lopez and Robert Lopez" and Won.
Mighty River was nominated in Music (Original Song) for from Mudbound; Music and Lyric by Mary J. Blige, Raphael Saadiq and Taura Stinson" and Lost.
Mystery Of Love was nominated in Music (Original Song) for from Call Me by Your Name; Music and Lyric by Sufjan Stevens" and Lost.
Stand Up For Something was nominated in Music (Original Song) for from Marshall; Music by Diane Warren; Lyric by Lonnie R. Lynn and Diane Warren" and Lost.
This Is Me was nominated in Music (Original Song) for from The Greatest Showman; Music and Lyric by Benj Pasek and Justin Paul" and Lost.
The Shape of Water was nominated in Best Picture for Guillermo del Toro and J. Miles Dale, Producers" and Won.
Call Me by Your Name was nominated in Best Picture for Peter Spears, Luca Guadagnino, Emilie Georges and Marco Morabito, Producers" and Lost.
Darkest Hour was nominated in Best Picture for Tim Bevan, Eric Fellner, Lisa Bruce, Anthony McCarten and Douglas Urbanski, Producers" and Lost.
Dunkirk was nominated in Best Picture for Emma Thomas and Christopher Nolan, Producers" and Lost.
Get Out was nominated in Best Picture for Sean McKittrick, Jason Blum, Edward H. Hamm Jr. and Jordan Peele, Producers" and Lost.
Lady Bird was nominated in Best Picture for Scott Rudin, Eli Bush and Evelyn O'Neill, Producers" and Lost.
Phantom Thread was nominated in Best Picture for JoAnne Sellar, Paul Thomas Anderson, Megan Ellison and Daniel Lupi, Producers" and Lost.
The Post was nominated in Best Picture for Amy Pascal, Steven Spielberg and Kristie Macosko Krieger, Producers" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Best Picture for Graham Broadbent, Pete Czernin and Martin McDonagh, Producers" and Lost.
The Shape of Water was nominated in Production Design for Production Design: Paul Denham Austerberry; Set Decoration: Shane Vieau and Jeffrey A. Melvin" and Won.
Beauty and the Beast was nominated in Production Design for Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" and Lost.
Blade Runner 2049 was nominated in Production Design for Production Design: Dennis Gassner; Set Decoration: Alessandra Querzola" and Lost.
Darkest Hour was nominated in Production Design for Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" and Lost.
Dunkirk was nominated in Production Design for Production Design: Nathan Crowley; Set Decoration: Gary Fettis" and Lost.
Dear Basketball was nominated in Short Film (Animated) for Glen Keane and Kobe Bryant" and Won.
Garden Party was nominated in Short Film (Animated) for Victor Caire and Gabriel Grapperon" and Lost.
Lou was nominated in Short Film (Animated) for Dave Mullins and Dana Murray" and Lost.
Negative Space was nominated in Short Film (Animated) for Max Porter and Ru Kuwahata" and Lost.
Revolting Rhymes was nominated in Short Film (Animated) for Jakob Schuh and Jan Lachauer" and Lost.
The Silent Child was nominated in Short Film (Live Action) for Chris Overton and Rachel Shenton" and Won.
DeKalb Elementary was nominated in Short Film (Live Action) for Reed Van Dyk" and Lost.
The Eleven O'Clock was nominated in Short Film (Live Action) for Derin Seale and Josh Lawson" and Lost.
My Nephew Emmett was nominated in Short Film (Live Action) for Kevin Wilson, Jr." and Lost.
Watu Wote/All of Us was nominated in Short Film (Live Action) for Katja Benrath and Tobias Rosen" and Lost.
Dunkirk was nominated in Sound Editing for Richard King and Alex Gibson" and Won.
Baby Driver was nominated in Sound Editing for Julian Slater" and Lost.
Blade Runner 2049 was nominated in Sound Editing for Mark Mangini and Theo Green" and Lost.
The Shape of Water was nominated in Sound Editing for Nathan Robitaille and Nelson Ferreira" and Lost.
Star Wars: The Last Jedi was nominated in Sound Editing for Matthew Wood and Ren Klyce" and Lost.
Dunkirk was nominated in Sound Mixing for Gregg Landaker, Gary A. Rizzo and Mark Weingarten" and Won.
Baby Driver was nominated in Sound Mixing for Julian Slater, Tim Cavagin and Mary H. Ellis" and Lost.
Blade Runner 2049 was nominated in Sound Mixing for Ron Bartlett, Doug Hemphill and Mac Ruth" and Lost.
The Shape of Water was nominated in Sound Mixing for Christian Cooke, Brad Zoern and Glen Gauthier" and Lost.
Star Wars: The Last Jedi was nominated in Sound Mixing for David Parker, Michael Semanick, Ren Klyce and Stuart Wilson" and Lost.
Blade Runner 2049 was nominated in Visual Effects for John Nelson, Gerd Nefzer, Paul Lambert and Richard R. Hoover" and Won.
Guardians of the Galaxy Vol. 2 was nominated in Visual Effects for Christopher Townsend, Guy Williams, Jonathan Fawkner and Dan Sudick" and Lost.
Kong: Skull Island was nominated in Visual Effects for Stephen Rosenbaum, Jeff White, Scott Benza and Mike Meinardus" and Lost.
Star Wars: The Last Jedi was nominated in Visual Effects for Ben Morris, Mike Mulholland, Neal Scanlan and Chris Corbould" and Lost.
War for the Planet of the Apes was nominated in Visual Effects for Joe Letteri, Daniel Barrett, Dan Lemmon and Joel Whist" and Lost.
Call Me by Your Name was nominated in Writing (Adapted Screenplay) for Screenplay by James Ivory" and Won.
The Disaster Artist was nominated in Writing (Adapted Screenplay) for Screenplay by Scott Neustadter & Michael H. Weber" and Lost.
Logan was nominated in Writing (Adapted Screenplay) for Screenplay by Scott Frank & James Mangold and Michael Green; Story by James Mangold" and Lost.
Molly's Game was nominated in Writing (Adapted Screenplay) for Written for the screen by Aaron Sorkin" and Lost.
Mudbound was nominated in Writing (Adapted Screenplay) for Screenplay by Virgil Williams and Dee Rees" and Lost.
Get Out was nominated in Writing (Original Screenplay) for Written by Jordan Peele" and Won.
The Big Sick was nominated in Writing (Original Screenplay) for Written by Emily V. Gordon & Kumail Nanjiani" and Lost.
Lady Bird was nominated in Writing (Original Screenplay) for Written by Greta Gerwig" and Lost.
The Shape of Water was nominated in Writing (Original Screenplay) for Screenplay by Guillermo del Toro & Vanessa Taylor; Story by Guillermo del Toro" and Lost.
Three Billboards outside Ebbing, Missouri was nominated in Writing (Original Screenplay) for Written by Martin McDonagh" and Lost.
###Markdown
Include some flow control, if the name "actor" or "actree" appears in the category title, then do nominee name first and movie name second, otherwise do movie name first and nominee name second.
###Code
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
print("{0} was nominated in {1} for \"{2}\" and {3}.".format(name,category,movie,winner))
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
print("\"{0}\" was nominated in {1} for {2} and {3}.".format(name,category,movie,winner))
###Output
Gary Oldman was nominated in Actor in a Leading Role for "Darkest Hour" and Won.
Timothée Chalamet was nominated in Actor in a Leading Role for "Call Me by Your Name" and Lost.
Daniel Day-Lewis was nominated in Actor in a Leading Role for "Phantom Thread" and Lost.
Daniel Kaluuya was nominated in Actor in a Leading Role for "Get Out" and Lost.
Denzel Washington was nominated in Actor in a Leading Role for "Roman J. Israel, Esq." and Lost.
Sam Rockwell was nominated in Actor in a Supporting Role for "Three Billboards outside Ebbing, Missouri" and Won.
Willem Dafoe was nominated in Actor in a Supporting Role for "The Florida Project" and Lost.
Woody Harrelson was nominated in Actor in a Supporting Role for "Three Billboards outside Ebbing, Missouri" and Lost.
Richard Jenkins was nominated in Actor in a Supporting Role for "The Shape of Water" and Lost.
Christopher Plummer was nominated in Actor in a Supporting Role for "All the Money in the World" and Lost.
Frances McDormand was nominated in Actress in a Leading Role for "Three Billboards outside Ebbing, Missouri" and Won.
Sally Hawkins was nominated in Actress in a Leading Role for "The Shape of Water" and Lost.
Margot Robbie was nominated in Actress in a Leading Role for "I, Tonya" and Lost.
Saoirse Ronan was nominated in Actress in a Leading Role for "Lady Bird" and Lost.
Meryl Streep was nominated in Actress in a Leading Role for "The Post" and Lost.
Allison Janney was nominated in Actress in a Supporting Role for "I, Tonya" and Won.
Mary J. Blige was nominated in Actress in a Supporting Role for "Mudbound" and Lost.
Lesley Manville was nominated in Actress in a Supporting Role for "Phantom Thread" and Lost.
Laurie Metcalf was nominated in Actress in a Supporting Role for "Lady Bird" and Lost.
Octavia Spencer was nominated in Actress in a Supporting Role for "The Shape of Water" and Lost.
"Lee Unkrich and Darla K. Anderson" was nominated in Animated Feature Film for Coco and Won.
"Tom McGrath and Ramsey Naito" was nominated in Animated Feature Film for The Boss Baby and Lost.
"Nora Twomey and Anthony Leo" was nominated in Animated Feature Film for The Breadwinner and Lost.
"Carlos Saldanha and Lori Forte" was nominated in Animated Feature Film for Ferdinand and Lost.
"Dorota Kobiela, Hugh Welchman and Ivan Mactaggart" was nominated in Animated Feature Film for Loving Vincent and Lost.
"Roger A. Deakins" was nominated in Cinematography for Blade Runner 2049 and Won.
"Bruno Delbonnel" was nominated in Cinematography for Darkest Hour and Lost.
"Hoyte van Hoytema" was nominated in Cinematography for Dunkirk and Lost.
"Rachel Morrison" was nominated in Cinematography for Mudbound and Lost.
"Dan Laustsen" was nominated in Cinematography for The Shape of Water and Lost.
"Mark Bridges" was nominated in Costume Design for Phantom Thread and Won.
"Jacqueline Durran" was nominated in Costume Design for Beauty and the Beast and Lost.
"Jacqueline Durran" was nominated in Costume Design for Darkest Hour and Lost.
"Luis Sequeira" was nominated in Costume Design for The Shape of Water and Lost.
"Consolata Boyle" was nominated in Costume Design for Victoria & Abdul and Lost.
"Guillermo del Toro" was nominated in Directing for The Shape of Water and Won.
"Christopher Nolan" was nominated in Directing for Dunkirk and Lost.
"Jordan Peele" was nominated in Directing for Get Out and Lost.
"Greta Gerwig" was nominated in Directing for Lady Bird and Lost.
"Paul Thomas Anderson" was nominated in Directing for Phantom Thread and Lost.
"Bryan Fogel and Dan Cogan" was nominated in Documentary (Feature) for Icarus and Won.
"Steve James, Mark Mitten and Julie Goldman" was nominated in Documentary (Feature) for Abacus: Small Enough to Jail and Lost.
"Agnès Varda, JR and Rosalie Varda" was nominated in Documentary (Feature) for Faces Places and Lost.
"Feras Fayyad, Kareem Abeed and Søren Steen Jespersen" was nominated in Documentary (Feature) for Last Men in Aleppo and Lost.
"Yance Ford and Joslyn Barnes" was nominated in Documentary (Feature) for Strong Island and Lost.
"Frank Stiefel" was nominated in Documentary (Short Subject) for Heaven Is a Traffic Jam on the 405 and Won.
"Laura Checkoway and Thomas Lee Wright" was nominated in Documentary (Short Subject) for Edith+Eddie and Lost.
"Elaine McMillion Sheldon and Kerrin Sheldon" was nominated in Documentary (Short Subject) for Heroin(e) and Lost.
"Thomas Lennon" was nominated in Documentary (Short Subject) for Knife Skills and Lost.
"Kate Davis and David Heilbroner" was nominated in Documentary (Short Subject) for Traffic Stop and Lost.
"Lee Smith" was nominated in Film Editing for Dunkirk and Won.
"Paul Machliss and Jonathan Amos" was nominated in Film Editing for Baby Driver and Lost.
"Tatiana S. Riegel" was nominated in Film Editing for I, Tonya and Lost.
"Sidney Wolinsky" was nominated in Film Editing for The Shape of Water and Lost.
"Jon Gregory" was nominated in Film Editing for Three Billboards outside Ebbing, Missouri and Lost.
"Chile" was nominated in Foreign Language Film for A Fantastic Woman and Won.
"Lebanon" was nominated in Foreign Language Film for The Insult and Lost.
"Russia" was nominated in Foreign Language Film for Loveless and Lost.
"Hungary" was nominated in Foreign Language Film for On Body and Soul and Lost.
"Sweden" was nominated in Foreign Language Film for The Square and Lost.
"Kazuhiro Tsuji, David Malinowski and Lucy Sibbick" was nominated in Makeup and Hairstyling for Darkest Hour and Won.
"Daniel Phillips and Lou Sheppard" was nominated in Makeup and Hairstyling for Victoria & Abdul and Lost.
"Arjen Tuiten" was nominated in Makeup and Hairstyling for Wonder and Lost.
"Alexandre Desplat" was nominated in Music (Original Score) for The Shape of Water and Won.
"Hans Zimmer" was nominated in Music (Original Score) for Dunkirk and Lost.
"Jonny Greenwood" was nominated in Music (Original Score) for Phantom Thread and Lost.
"John Williams" was nominated in Music (Original Score) for Star Wars: The Last Jedi and Lost.
"Carter Burwell" was nominated in Music (Original Score) for Three Billboards outside Ebbing, Missouri and Lost.
"from Coco; Music and Lyric by Kristen Anderson-Lopez and Robert Lopez" was nominated in Music (Original Song) for Remember Me and Won.
"from Mudbound; Music and Lyric by Mary J. Blige, Raphael Saadiq and Taura Stinson" was nominated in Music (Original Song) for Mighty River and Lost.
"from Call Me by Your Name; Music and Lyric by Sufjan Stevens" was nominated in Music (Original Song) for Mystery Of Love and Lost.
"from Marshall; Music by Diane Warren; Lyric by Lonnie R. Lynn and Diane Warren" was nominated in Music (Original Song) for Stand Up For Something and Lost.
"from The Greatest Showman; Music and Lyric by Benj Pasek and Justin Paul" was nominated in Music (Original Song) for This Is Me and Lost.
"Guillermo del Toro and J. Miles Dale, Producers" was nominated in Best Picture for The Shape of Water and Won.
"Peter Spears, Luca Guadagnino, Emilie Georges and Marco Morabito, Producers" was nominated in Best Picture for Call Me by Your Name and Lost.
"Tim Bevan, Eric Fellner, Lisa Bruce, Anthony McCarten and Douglas Urbanski, Producers" was nominated in Best Picture for Darkest Hour and Lost.
"Emma Thomas and Christopher Nolan, Producers" was nominated in Best Picture for Dunkirk and Lost.
"Sean McKittrick, Jason Blum, Edward H. Hamm Jr. and Jordan Peele, Producers" was nominated in Best Picture for Get Out and Lost.
"Scott Rudin, Eli Bush and Evelyn O'Neill, Producers" was nominated in Best Picture for Lady Bird and Lost.
"JoAnne Sellar, Paul Thomas Anderson, Megan Ellison and Daniel Lupi, Producers" was nominated in Best Picture for Phantom Thread and Lost.
"Amy Pascal, Steven Spielberg and Kristie Macosko Krieger, Producers" was nominated in Best Picture for The Post and Lost.
"Graham Broadbent, Pete Czernin and Martin McDonagh, Producers" was nominated in Best Picture for Three Billboards outside Ebbing, Missouri and Lost.
"Production Design: Paul Denham Austerberry; Set Decoration: Shane Vieau and Jeffrey A. Melvin" was nominated in Production Design for The Shape of Water and Won.
"Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" was nominated in Production Design for Beauty and the Beast and Lost.
"Production Design: Dennis Gassner; Set Decoration: Alessandra Querzola" was nominated in Production Design for Blade Runner 2049 and Lost.
"Production Design: Sarah Greenwood; Set Decoration: Katie Spencer" was nominated in Production Design for Darkest Hour and Lost.
"Production Design: Nathan Crowley; Set Decoration: Gary Fettis" was nominated in Production Design for Dunkirk and Lost.
"Glen Keane and Kobe Bryant" was nominated in Short Film (Animated) for Dear Basketball and Won.
"Victor Caire and Gabriel Grapperon" was nominated in Short Film (Animated) for Garden Party and Lost.
"Dave Mullins and Dana Murray" was nominated in Short Film (Animated) for Lou and Lost.
"Max Porter and Ru Kuwahata" was nominated in Short Film (Animated) for Negative Space and Lost.
"Jakob Schuh and Jan Lachauer" was nominated in Short Film (Animated) for Revolting Rhymes and Lost.
"Chris Overton and Rachel Shenton" was nominated in Short Film (Live Action) for The Silent Child and Won.
"Reed Van Dyk" was nominated in Short Film (Live Action) for DeKalb Elementary and Lost.
"Derin Seale and Josh Lawson" was nominated in Short Film (Live Action) for The Eleven O'Clock and Lost.
"Kevin Wilson, Jr." was nominated in Short Film (Live Action) for My Nephew Emmett and Lost.
"Katja Benrath and Tobias Rosen" was nominated in Short Film (Live Action) for Watu Wote/All of Us and Lost.
"Richard King and Alex Gibson" was nominated in Sound Editing for Dunkirk and Won.
"Julian Slater" was nominated in Sound Editing for Baby Driver and Lost.
"Mark Mangini and Theo Green" was nominated in Sound Editing for Blade Runner 2049 and Lost.
"Nathan Robitaille and Nelson Ferreira" was nominated in Sound Editing for The Shape of Water and Lost.
"Matthew Wood and Ren Klyce" was nominated in Sound Editing for Star Wars: The Last Jedi and Lost.
"Gregg Landaker, Gary A. Rizzo and Mark Weingarten" was nominated in Sound Mixing for Dunkirk and Won.
"Julian Slater, Tim Cavagin and Mary H. Ellis" was nominated in Sound Mixing for Baby Driver and Lost.
"Ron Bartlett, Doug Hemphill and Mac Ruth" was nominated in Sound Mixing for Blade Runner 2049 and Lost.
"Christian Cooke, Brad Zoern and Glen Gauthier" was nominated in Sound Mixing for The Shape of Water and Lost.
"David Parker, Michael Semanick, Ren Klyce and Stuart Wilson" was nominated in Sound Mixing for Star Wars: The Last Jedi and Lost.
"John Nelson, Gerd Nefzer, Paul Lambert and Richard R. Hoover" was nominated in Visual Effects for Blade Runner 2049 and Won.
"Christopher Townsend, Guy Williams, Jonathan Fawkner and Dan Sudick" was nominated in Visual Effects for Guardians of the Galaxy Vol. 2 and Lost.
"Stephen Rosenbaum, Jeff White, Scott Benza and Mike Meinardus" was nominated in Visual Effects for Kong: Skull Island and Lost.
"Ben Morris, Mike Mulholland, Neal Scanlan and Chris Corbould" was nominated in Visual Effects for Star Wars: The Last Jedi and Lost.
"Joe Letteri, Daniel Barrett, Dan Lemmon and Joel Whist" was nominated in Visual Effects for War for the Planet of the Apes and Lost.
"Screenplay by James Ivory" was nominated in Writing (Adapted Screenplay) for Call Me by Your Name and Won.
"Screenplay by Scott Neustadter & Michael H. Weber" was nominated in Writing (Adapted Screenplay) for The Disaster Artist and Lost.
"Screenplay by Scott Frank & James Mangold and Michael Green; Story by James Mangold" was nominated in Writing (Adapted Screenplay) for Logan and Lost.
"Written for the screen by Aaron Sorkin" was nominated in Writing (Adapted Screenplay) for Molly's Game and Lost.
"Screenplay by Virgil Williams and Dee Rees" was nominated in Writing (Adapted Screenplay) for Mudbound and Lost.
"Written by Jordan Peele" was nominated in Writing (Original Screenplay) for Get Out and Won.
"Written by Emily V. Gordon & Kumail Nanjiani" was nominated in Writing (Original Screenplay) for The Big Sick and Lost.
"Written by Greta Gerwig" was nominated in Writing (Original Screenplay) for Lady Bird and Lost.
"Screenplay by Guillermo del Toro & Vanessa Taylor; Story by Guillermo del Toro" was nominated in Writing (Original Screenplay) for The Shape of Water and Lost.
"Written by Martin McDonagh" was nominated in Writing (Original Screenplay) for Three Billboards outside Ebbing, Missouri and Lost.
###Markdown
Rather than printing out the information, store it in `nominees_2018` so that we can turn it into a DataFrame.
###Code
nominees_2018 = []
for group in oscars2018_true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
# Swap out the print
# Make a payload for each nominee
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':2018, # We're only looking at 2018 right now
'Winner':winner}
# Add the payload to the list of nominees at top
nominees_2018.append(nominee_payload)
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
# Swap out the print
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':2018,
'Winner':winner}
nominees_2018.append(nominee_payload)
###Output
_____no_output_____
###Markdown
Moment of truth!
###Code
nominees_df = pd.DataFrame(nominees_2018)
nominees_df
###Output
_____no_output_____
###Markdown
Now let's turn this hulking beast of a parser into a function so we can apply it to other years' nominees in the next step.
###Code
def parse_nominees(true_groups,year):
nominees_list = []
for group in true_groups:
category = group.find_all('h2')[0].text
if 'Actor' in category or 'Actress' in category:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
name = subdivs[0].text.strip()
movie = subdivs[1].text.strip()
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':year, # We may look at other years
'Winner':winner}
nominees_list.append(nominee_payload)
else:
for i,nominee in enumerate(group.find_all('div',{'class':'views-row'})):
if i == 0:
winner = 'Won'
else:
winner = 'Lost'
subdivs = nominee.find_all('div')
movie = subdivs[0].text.strip()
name = subdivs[1].text.strip()
nominee_payload = {'Category':category,
'Name':name,
'Movie':movie,
'Year':year,
'Winner':winner}
nominees_list.append(nominee_payload)
return nominees_list
###Output
_____no_output_____
###Markdown
Iterating vs. parsing to retrieve dataOften the data you are interested in is spread across multiple web pages. In an ideal world, the naming conventions would let you retrieve the data from these pages systematically. In the case of the Oscars, the URLs appear to be consistently formatted: `https://www.oscars.org/oscars/ceremonies/2019` suggests that we could change the 2019 to any other date going back to the start of the Oscars and get that year as well: `https://www.oscars.org/oscars/ceremonies/2018` should get us the page for 2018, and so on. Let's demonstrate each of these strategies with the Oscars data: iterating from 2019 back to 1929 in the URL versus parsing the list of links from the header. Iterating strategies for retrieving dataThe fundamental assumption with this strategy is that the data are stored at URLs in a consistent way that we can access sequentially. In the case of the Oscars, we *should* be able to simply pass each year to the URL in requests. Here we want to practice responsible data scraping by including a sleep between each request so that we do not overwhelm the Oscars server with requests. We can use the `sleep` function within `time`.
###Code
from time import sleep
###Output
_____no_output_____
###Markdown
The `sleep(3)` below prevents any more code from progressing for 3 seconds.
###Code
print("The start of something.")
sleep(3)
print("The end of something.")
###Output
The start of something.
The end of something.
###Markdown
The core part of the iterating strategy is simply using Python's [`range`](https://docs.python.org/3.7/library/functions.htmlfunc-range) function to generate a sequence of values. Here, we can use `range` to print out a sequence of URLs that should correspond to awards pages from 2010 through 2019. We can also incorporate the `sleep` functionality and wait a second between each `print` statement—it should now take 10 seconds for this code to finish printing. This simulates how we can use `sleep` to slow down and spread out requests so that we do not overwhelm the servers whose data we are trying to scrape.
###Code
for year in range(2010,2020):
sleep(1)
print('https://www.oscars.org/oscars/ceremonies/{0}'.format(year))
###Output
https://www.oscars.org/oscars/ceremonies/2010
https://www.oscars.org/oscars/ceremonies/2011
https://www.oscars.org/oscars/ceremonies/2012
https://www.oscars.org/oscars/ceremonies/2013
https://www.oscars.org/oscars/ceremonies/2014
https://www.oscars.org/oscars/ceremonies/2015
https://www.oscars.org/oscars/ceremonies/2016
https://www.oscars.org/oscars/ceremonies/2017
https://www.oscars.org/oscars/ceremonies/2018
https://www.oscars.org/oscars/ceremonies/2019
###Markdown
We defined a function `parse_nominees` above that takes the "true groups" of nominees. Let's try to tie these pieces together for all the nominees in the 2010s.
###Code
# Create an empty list to store the data we get
all_years_nominees = dict()
# For each year starting in 2010 until 2019
for year in range(2010,2020):
# Pause for a second between each request
sleep(1)
# Get the raw HTML
year_raw_html = requests.get('https://www.oscars.org/oscars/ceremonies/{0}'.format(year)).text
# Soup-ify
year_souped_html = BeautifulSoup(year_raw_html)
# Get the parent group
year_parent_group = year_souped_html.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Get the true groups under the parent group
year_true_groups = year_parent_group[0].find_all('div',{'class':'view-grouping'})
# Use our parsing function, passing the year from above
year_nominees = parse_nominees(year_true_groups,year)
# Convert the year_nominees to a DataFrame and add them to all_years_nominees
all_years_nominees[year] = pd.DataFrame(year_nominees)
###Output
_____no_output_____
###Markdown
Combine each of the DataFrames in `all_years_nominees` into a giant DataFrame of all the nominees from 2010-2019.
###Code
all_years_nominees_df = pd.concat(all_years_nominees)
all_years_nominees_df.reset_index(drop=True).head(10)
###Output
_____no_output_____
###Markdown
Parsing strategy for retrieving dataFrustratingly, this iterating strategy may not always hold: maybe some years are skipped or the naming convention changes at some point. We will cover some basics of [error-handling in Python](https://realpython.com/python-exceptions/) that could let us work around errors as they pop up, but this may result in an incomplete collection if the naming conventions are systematic. What we would want to do is to identify all the links ahead of time by parsing them from list and then work through that list to get the complete data collection.What this means in the context of our Oscars example is assuming that we cannot trust that the sequential numbering of the years is a realiable guide to get all the data. Instead, we should get a list of the URLs for each of the awards pages from the "ceremonies-decade-scroller" (from Inspect) at the top. This scroller *should* be consistent across all the pages, but start with the nominees for 2019 just to be safe:
###Code
oscars2019_raw = requests.get('https://www.oscars.org/oscars/ceremonies/2019').text
oscars2019_soup = BeautifulSoup(oscars2019_raw)
###Output
_____no_output_____
###Markdown
Using the Inspect tool, there is a `` that contains the links to each of the years. Run a `.find_all` to get all these href locations.
###Code
# Get the <div class="years"> as a parent tag first, just in case there are <a class="years"> elsewhere
oscars2019_years_div = oscars2019_soup.find_all('div',{'class':'years'})[0]
# Now get the <a class="years"> underneath only the oscars2019_years_div
oscars2019_years_a = oscars2019_years_div.find_all('a',{'class':'year'})
# Inspect the first 10
oscars2019_years_a[:10]
###Output
_____no_output_____
###Markdown
Each of these `` tags contains an "href", or the URL element where the page lives, and a text element for what's displayed.
###Code
oscars2019_years_a[0]['href']
oscars2019_years_a[0].text
###Output
_____no_output_____
###Markdown
Now we can write a loop to print out the URL locations for all the other award years based on the "official" links in the "ceremonies-decade-scroller" navigation rather than assuming the years are sequential—I promise this will pay dividends in the future when inconsistent design wreaks havoc on your sequential data strategies!
###Code
for a in oscars2019_years_a[-10:]:
href = a['href']
print('https://www.oscars.org' + href)
###Output
https://www.oscars.org/oscars/ceremonies/2010
https://www.oscars.org/oscars/ceremonies/2011
https://www.oscars.org/oscars/ceremonies/2012
https://www.oscars.org/oscars/ceremonies/2013
https://www.oscars.org/oscars/ceremonies/2014
https://www.oscars.org/oscars/ceremonies/2015
https://www.oscars.org/oscars/ceremonies/2016
https://www.oscars.org/oscars/ceremonies/2017
https://www.oscars.org/oscars/ceremonies/2018
https://www.oscars.org/oscars/ceremonies/2019
###Markdown
We can now use the `parse_nominees` function for these pages as well.
###Code
# Create an empty list to store the data we get
all_years_nominees = dict()
# For the 10 most recent years
for a in oscars2019_years_a[-10:]:
# Pause for a second between each request
sleep(1)
# Get the href
href = a['href']
# Get the year
year = a.text
# Get the raw HTML
url = 'https://www.oscars.org' + href
year_raw_html = requests.get(url).text
# Soup-ify
year_souped_html = BeautifulSoup(year_raw_html)
# Get the parent group
year_parent_group = year_souped_html.find_all('div',{'id':'quicktabs-tabpage-honorees-0'})
# Get the true groups under the parent group
year_true_groups = year_parent_group[0].find_all('div',{'class':'view-grouping'})
# Use our parsing function, passing the year from above
year_nominees = parse_nominees(year_true_groups,year)
# Convert the year_nominees to a DataFrame and add them to all_years_nominees
all_years_nominees[year] = pd.DataFrame(year_nominees)
###Output
_____no_output_____
###Markdown
Combine each of the DataFrames in `all_years_nominees` into a giant DataFrame of all the nominees from 2010-2019.
###Code
all_years_nominees_df = pd.concat(all_years_nominees)
all_years_nominees_df.reset_index(drop=True).head(10)
###Output
_____no_output_____ |
nbs/dl1/Accumulating_Batchnorm (PETS)-v4.ipynb | ###Markdown
No Acc
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=64
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy)
learn.fit(1)
data.batch_size
###Output
_____no_output_____
###Markdown
Naive Acc
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.fit(1)
###Output
_____no_output_____
###Markdown
Acc + BnFreeze
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32), BnFreeze])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.fit(1)
###Output
_____no_output_____
###Markdown
Increase Momentum
###Code
def set_bn_mom(m:nn.Module, mom=0.9):
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types):
l.momentum = mom
set_bn_mom(l, mom)
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
set_bn_mom(learn.model, mom=0.9)
learn.fit(1)
###Output
_____no_output_____
###Markdown
Decrease Momentum
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
set_bn_mom(learn.model, mom=0.01)
learn.fit(1)
###Output
_____no_output_____
###Markdown
InstanceNorm
###Code
def bn2instance(bn):
if isinstance(bn, nn.BatchNorm1d): inst = nn.InstanceNorm1d(bn.num_features, affine=True)
elif isinstance(bn, nn.BatchNorm2d): inst = nn.InstanceNorm2d(bn.num_features, affine=True)
elif isinstance(bn, nn.BatchNorm3d): inst = nn.InstanceNorm3d(bn.num_features, affine=True)
inst.weight = bn.weight
inst.bias = bn.bias
inst.running_mean = nn.Parameter(bn.running_mean, requires_grad=False)
inst.running_var = nn.Parameter(bn.running_var, requires_grad=False)
inst.momentum = bn.momentum
inst.eps = bn.eps
inst.track_running_stats = bn.track_running_stats
return (inst).to(bn.weight.device)
def convert_bn(list_mods, func=bn2instance):
for i in range(len(list_mods)):
if isinstance(list_mods[i], bn_types):
list_mods[i] = func(list_mods[i])
elif list_mods[i].__class__.__name__ in ("Sequential", "BasicBlock"):
list_mods[i] = nn.Sequential(*convert_bn(list(list_mods[i].children()), func))
return list_mods
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.model = nn.Sequential(*convert_bn(list(learn.model.children()), bn2instance))
learn.freeze()
learn.fit(1)
###Output
_____no_output_____
###Markdown
GroupNorm
###Code
groups = 64
def bn2group(bn):
groupnorm = nn.GroupNorm(groups, bn.num_features, affine=True)
groupnorm.weight = bn.weight
groupnorm.bias = torch.nn.Parameter(bn.bias/2)
groupnorm.eps = bn.eps
return (groupnorm).to(bn.weight.device)
def convert_bn(list_mods, func=bn2group):
for i in range(len(list_mods)):
if isinstance(list_mods[i], bn_types):
list_mods[i] = func(list_mods[i])
elif list_mods[i].__class__.__name__ in ("Sequential", "BasicBlock"):
list_mods[i] = nn.Sequential(*convert_bn(list(list_mods[i].children()), func))
return list_mods
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.model = nn.Sequential(*convert_bn(list(learn.model.children()), bn2group))
learn.freeze()
learn.fit(1)
###Output
_____no_output_____
###Markdown
Resnet + GroupNorm
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
def change_all_BN(module):
for i in range(5):
atr = 'bn'+str(i)
if hasattr(module, atr):
setattr(module, atr, bn2group(getattr(module,atr)))
def wrap_BN(model):
for i in range(len(model)):
for j in range(len(model[i])):
if isinstance(model[i][j], bn_types):
model[i][j] = bn2group(model[i][j])
elif model[i][j].__class__.__name__ == "Sequential":
for k in range(len(model[i][j])):
if isinstance(model[i][j][k], bn_types):
model[i][j][k] = bn2group(model[i][j][k])
elif model[i][j][k].__class__.__name__ == "BasicBlock":
change_all_BN(model[i][j][k])
if hasattr(model[i][j][k],'downsample'):
if model[i][j][k].downsample is not None:
for l in range(len(model[i][j][k].downsample)):
if isinstance(model[i][j][k].downsample[l], bn_types):
model[i][j][k].downsample[l] = bn2group(model[i][j][k].downsample[l])
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
###Output
_____no_output_____
###Markdown
Resnet + GroupNorm (No Acc)
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
###Output
_____no_output_____
###Markdown
Resnet + GroupNorm (No Acc) bs = 1
###Code
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=1
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
###Output
_____no_output_____ |
bond_angle.ipynb | ###Markdown
Classical: LAMMPS Output
###Code
# Number of vertices
V_class = np.int (pd.read_csv('NVE-J.atom', header=None).iloc[3][0])
xlo = np.float(pd.read_csv('NVE-J.atom', header=None).iloc[5][0].split()[0])
xhi = np.float(pd.read_csv('NVE-J.atom', header=None).iloc[5][0].split()[1])
ylo = np.float(pd.read_csv('NVE-J.atom', header=None).iloc[6][0].split()[0])
yhi = np.float(pd.read_csv('NVE-J.atom', header=None).iloc[6][0].split()[1])
zlo = np.float(pd.read_csv('NVE-J.atom', header=None).iloc[7][0].split()[0])
zhi = np.float(pd.read_csv('NVE-J.atom', header=None).iloc[7][0].split()[1])
lx_class=xhi-xlo
ly_class=yhi-ylo
lz_class=zhi-zlo
vol_class=lx_class*ly_class*lz_class
pos_class = pd.read_csv('NVE-J.atom', skiprows=9, header=None, delimiter=" ", error_bad_lines=False).iloc[0:].iloc[:V]
x_class=np.array(pos_class[2], dtype=float)*lx_class
y_class=np.array(pos_class[3], dtype=float)*ly_class
z_class=np.array(pos_class[4], dtype=float)*lz_class
# Distance calculation
dist_class=np.full((V_class,V_class),0.0)
disp_class=np.full((V_class,V_class,3),0.0)
for m in range(V_class):
for l in range(V_class):
dmy_rx=x_class[m]-x_class[l]
dmy_ry=y_class[m]-y_class[l]
dmy_rz=z_class[m]-z_class[l]
dmy_rx-=Nint(dmy_rx/lx)*lx_class
dmy_ry-=Nint(dmy_ry/ly)*ly_class
dmy_rz-=Nint(dmy_rz/lz)*lz_class
dist_class[m][l]=np.sqrt(dmy_rx**2+dmy_ry**2+dmy_rz**2)
disp_class[m][l]=np.array([dmy_rx, dmy_ry, dmy_rz])
###Output
_____no_output_____
###Markdown
AIMD: VASP Output
###Code
# Number of vertices
V_AIMD = np.int(pd.read_csv('ab-initio.txt', header=None).iloc[6][0])
lx_AIMD = np.float(pd.read_csv('ab-initio.txt', header=None).iloc[2][0].split()[0])
ly_AIMD = lx_AIMD
lz_AIMD = lx_AIMD
pos_AIMD = pd.read_csv('ab-initio.txt', skiprows=8, header=None, delimiter=" ").drop(columns=[0,1,3,5])
x_AIMD=np.array(pos_AIMD[2], dtype=float)*lx_AIMD
y_AIMD=np.array(pos_AIMD[4], dtype=float)*ly_AIMD
z_AIMD=np.array(pos_AIMD[6], dtype=float)*lz_AIMD
# Distance calculation
dist_AIMD=np.full((V_AIMD,V_AIMD),0.0)
disp_AIMD=np.full((V_AIMD,V_AIMD,3),0.0)
for m in range(V_AIMD):
for l in range(V_AIMD):
dmy_rx=x_AIMD[m]-x_AIMD[l]
dmy_ry=y_AIMD[m]-y_AIMD[l]
dmy_rz=z_AIMD[m]-z_AIMD[l]
dmy_rx-=Nint(dmy_rx/lx)*lx_AIMD
dmy_ry-=Nint(dmy_ry/ly)*ly_AIMD
dmy_rz-=Nint(dmy_rz/lz)*lz_AIMD
dist_AIMD[m][l]=np.sqrt(dmy_rx**2+dmy_ry**2+dmy_rz**2)
disp_AIMD[m][l]=np.array([dmy_rx, dmy_ry, dmy_rz])
# Adjacency matrix to estimate the bonds and neighbors
# AIMD
bond_dist_AIMD=2.725
adj_AIMD=dist_AIMD < bond_dist_AIMD
adj_AIMD=adj_AIMD.astype(int)
#Classical
bond_dist_class=2.925
adj_class=dist_class < bond_dist_class
adj_class=adj_class.astype(int)
# Set diagonal to zeros
np.fill_diagonal(adj_AIMD, 0)
np.fill_diagonal(adj_class, 0)
#Function to find the angle between two vectors in 3-D
def Theta(A,B):
return (np.arccos(np.dot(A,B)/(np.linalg.norm(A)*np.linalg.norm(B))))*180/np.pi
#---------------------------------------------------------------------------------------------------------------------
bonds_pack_AIMD=[]
for i in range(V_AIMD):
neighbors_AIMD=np.where(adj_AIMD[i] == 1)[0]
bonds_AIMD=[]
for j in neighbors_AIMD:
bonds_AIMD.append(disp_AIMD[i,j])
bonds_pack_AIMD.append(bonds_AIMD)
Theta_pack_AIMD=[]
for k in range(V_AIMD):
for i in range(len(bonds_pack_AIMD[k])):
for j in range(len(bonds_pack_AIMD[k])):
if i!=j:
Theta_pack_AIMD.append(Theta(bonds_pack_AIMD[k][i], bonds_pack_AIMD[k][j]))
Theta_pack_AIMD=np.array(Theta_pack_AIMD)
#---------------------------------------------------------------------------------------------------------------------
bonds_pack_class=[]
for i in range(V_class):
neighbors_class=np.where(adj_class[i] == 1)[0]
bonds_class=[]
for j in neighbors_class:
bonds_class.append(disp_class[i,j])
bonds_pack_class.append(bonds_class)
Theta_pack_class=[]
for k in range(V_class):
for i in range(len(bonds_pack_class[k])):
for j in range(len(bonds_pack_class[k])):
if i!=j:
Theta_pack_class.append(Theta(bonds_pack_class[k][i], bonds_pack_class[k][j]))
Theta_pack_class=np.array(Theta_pack_class)
sns.set_color_codes()
sns.distplot(Theta_pack_class,
kde_kws={"color": "k", "lw": 3},
hist_kws={"histtype": "step", "linewidth": 3,
"alpha": 1, "color": "g"}); #, rug=True, rug_kws={"color": "g"}
plt.xlabel('Angle (Degrees)');
plt.xlim(40,190)
plt.ylabel('Distribution');
plt.grid(True)
plt.title('Bond Angle Distribution (Classical)');
plt.savefig('bond_angle_dist_classical.png')
sns.set_color_codes()
sns.distplot(Theta_pack_AIMD,
kde_kws={"color": "k", "lw": 3},
hist_kws={"histtype": "step", "linewidth": 3,
"alpha": 1, "color": "g"}); #, rug=True, rug_kws={"color": "g"}
plt.xlabel('Angle (Degrees)');
plt.xlim(40,190)
plt.ylabel('Distribution');
plt.grid(True)
plt.title('Bond Angle Distribution (AIMD)');
plt.savefig('bond_angle_dist_AIMD.png')
np.savetxt('bond_angles_AIMD.csv', Theta_pack_AIMD, delimiter=',')
np.savetxt('bond_angles_classical.csv', Theta_pack_class, delimiter=',')
###Output
_____no_output_____ |
STAT641Handout8.ipynb | ###Markdown
Handout 8
###Code
#Location-Scale Reference Distribution Plots
#Example 1 F is a memeber of N(u,sigma^2) with mu and sigma both unknown
#Example 2 F is an exponential cdf with parameter Beta
#Example 3 Uniform CDF
#Example 4 Weibull CDF
# Normal Probability Plots
# Chisq PDF vs Normal QQ
# Normal vs. Student-t distribution
# Normal vs. Uniform Data
# Replicate of refdist.R as refdist.py
#Weibull vs Weibull for Varying Location and Scale
#Plots with Mixture Distributions
#Boxplots
#Replicate ozonecompare.R as ozonecompare.py
#Replicate ozonecompare,time.R
#Epilepsy_plots.R as Epilepsy_plots.py
###Output
_____no_output_____ |
aiml40/absa-instructions.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Intel NLP-Architect ABSA on AzureML INSTRUCTOR VERSION> **This instructor version of the notebook gives additional instructions as to which cells should be run in demo mode, and which should not. It assumes that before the demo you will execute the complete notebook, and then during the demo certain cells would be re-run to demonstrate working process.**This notebook contains an end-to-end walkthrough of using Azure Machine Learning Service to train, finetune and test [Aspect Based Sentiment Analysis Models using Intel's NLP Architect](http://nlp_architect.nervanasys.com/absa.html) Prerequisites* Understand the architecture and terms introduced by Azure Machine Learning (AML)* Have working Jupyter Notebook Environment. You can: - Install Python environment locally, as described below in **Local Installation** - Use [Azure Notebooks](https://docs.microsoft.com/ru-ru/azure/notebooks/azure-notebooks-overview/?wt.mc_id=absa-notebook-abornst). In this case you should upload the `absa.ipynb` file to a new Azure Notebooks project, or just clone the [GitHub Repo](https://github.com/microsoft/ignite-learning-paths-training-aiml/tree/master/aiml40).* Azure Machine Learning Workspace in your Azure Subscription Local InstallationInstall the Python SDK: make sure to install notebook, and contrib:```shellconda create -n azureml -y Python=3.6source activate azuremlpip install --upgrade azureml-sdk[notebooks,contrib] conda install ipywidgetsjupyter nbextension install --py --user azureml.widgetsjupyter nbextension enable azureml.widgets --user --py```You will need to restart jupyter after this Detailed instructions are [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python/?WT.mc_id=absa-notebook-abornst)If you need a free trial account to get started you can get one [here](https://azure.microsoft.com/en-us/offers/ms-azr-0044p/?WT.mc_id=absa-notebook-abornst) Creating Azure ML WorkspaceAzure ML Workspace can be created by using one of the following ways:* Manually through [Azure Portal](http://portal.azure.com/?WT.mc_id=absa-notebook-abornst) - [here is the complete walkthrough](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace/?wt.mc_id=absa-notebook-abornst)* Using [Azure CLI](https://docs.microsoft.com/ru-ru/cli/azure/?view=azure-cli-latest&wt.mc_id=absa-notebook-abornst), using the following commands:```shellaz extension add -n azure-cli-mlaz group create -n absa -l westus2az ml workspace create -w absa_space -g absa``` Initialize workspaceTo access an Azure ML Workspace, you will need to import the AML library and the following information:* A name for your workspace (in our example - `absa_space`)* Your subscription id (can be obtained by running `az account list`)* The resource group name (in our case `absa`)Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace/?WT.mc_id=absa-notebook-abornst) object from the existing workspace you created in the Prerequisites step or create a new one. > **This cell can be run without problem, because it will just create a connection object for the workspace. Make sure to insert the correct `subscription_id` value before use, or have `config.json` file ready.**
###Code
from azureml.core import Workspace
#subscription_id = ''
#resource_group = 'absa'
#workspace_name = 'absa_space'
#ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
#ws.write_config()
try:
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
print('Library configuration succeeded')
except:
print('Workspace not found')
###Output
WARNING - Warning: Falling back to use azure cli login credentials.
If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.
Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.
###Markdown
Compute There are two computer option run once(preview) and persistent compute for this demo we will use persistent compute to learn more about run once compute check out the [docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targetsamlcompute?WT.mc_id=absa-notebook-abornst). > **This cell can be run because it will not re-create a cluster. Although it does not make much sense to run it**
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cluster_name = "gandalf"
# Verify that cluster does not exist already
try:
cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D3_V2',
vm_priority='lowpriority',
min_nodes=1,
max_nodes=4)
cluster = ComputeTarget.create(ws, cluster_name, compute_config)
cluster.wait_for_completion(show_output=True)
###Output
Found existing cluster, use it.
Succeeded
AmlCompute wait for completion finished
Minimum number of nodes requested have been provisioned
###Markdown
Upload DataThe dataset we are using comes from the [womens ecommerce clothing reviews dataset](https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews/) and is in the open domain, this can be replaced with any csv file with rows of text as the absa model is unsupervised. The documentation for uploading data can be found [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.azure_storage_datastore.azureblobdatastore/?WT.mc_id=absa-notebook-abornst) for now we will us the ds.upload command.
###Code
# if using as a separate notebook - fetch files from github repo
if not os.path.isdir('dataset'):
!mkdir dataset
!wget -O 'dataset/clothing_absa_train_small.csv' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing_absa_train_small.csv'
!wget -O 'dataset/clothing_absa_train.csv' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing_absa_train.csv'
!wget -O 'dataset/clothing-absa-validation.json' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing-absa-validation.json'
!wget -O 'dataset/glove.840B.300d.zip' 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
#import os
#lib_root = os.path.dirname(os.path.abspath("__file__"))
#ds = ws.get_default_datastore()
#ds.upload('./dataset', target_path='clothing_data', overwrite=True, show_progress=True)
from azureml.core import Datastore
ds = Datastore.get(ws, 'absa')
###Output
_____no_output_____
###Markdown
Now the the glove file is uploaded to our datastore we can remove it from our local directory.
###Code
#!rm 'dataset/glove.840B.300d.zip'
###Output
_____no_output_____
###Markdown
Create An ExpiermentCreate an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureexperiment/?WT.mc_id=absa-notebook-abornst) to track all the runs in your workspace for this distributed PyTorch tutorial. > **In most of the cases, you want to skip the following 3 cells during the demo, in order not to run the experiment again. However, you may also start another experiment if time permists, in which case you can run them**
###Code
from azureml.core import Experiment
experiment_name = 'absa'
exp = Experiment(workspace=ws, name=experiment_name)
from azureml.train.estimator import Estimator
script_params = {
'--data_folder': ds,
}
nlp_est = Estimator(source_directory='.',
script_params=script_params,
compute_target=cluster,
environment_variables = {'NLP_ARCHITECT_BE':'CPU'},
entry_script='train.py',
pip_packages=['git+https://github.com/NervanaSystems/nlp-architect.git@absa',
'spacy==2.1.8']
)
run = exp.submit(nlp_est)
run_id = run.id
print(run_id)
###Output
_____no_output_____
###Markdown
Note: If you accidently run the following cell more than once you can cancel a run with the run.cancel() command.
###Code
# run.cancel()
###Output
_____no_output_____
###Markdown
> **To retrieve the run, we use run id here. It can either be hard-coded from the previous pre-demo run, or you can rely on the jupyter kernel not restarting, in which case it will be saved in the `run_id` variable. So, if the jupyter engine has not been restarted, you may run cell 2, otherwise run cell 1**
###Code
run = [r for r in exp.get_runs() if r.id == 'absa_1568985331_df076c3c'][0]
run = [r for r in exp.get_runs() if r.id == run_id][0]
###Output
_____no_output_____
###Markdown
> **Run this to show the result of the run, either in progress or completed**
###Code
from azureml.widgets import RunDetails
RunDetails(run).show()
###Output
_____no_output_____
###Markdown
Fine-Tuning NLP Archictect with AzureML HyperDriveAlthough ABSA is an unsupervised method it's hyper parameters such as the aspect and opinion word thresholds can be fined tuned if provided with a small sample of labeled data
###Code
from azureml.train.hyperdrive import *
import math
param_sampling = RandomParameterSampling({
'--asp_thresh': choice(range(2,5)),
'--op_thresh': choice(range(2,5)),
'--max_iter': choice(range(2,5))
})
###Output
_____no_output_____
###Markdown
Early Termination PolicyFirst we will define an early terminination policy. [Median stopping](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.medianstoppingpolicy?WT.mc_id=absa-notebook-abornst) is an early termination policy based on running averages of primary metrics reported by the runs. This policy computes running averages across all training runs and terminates runs whose performance is worse than the median of the running averages. This policy takes the following configuration parameters:- evaluation_interval: the frequency for applying the policy (optional parameter).- delay_evaluation: delays the first policy evaluation for a specified number of intervals (optional parameter).
###Code
early_termination_policy = MedianStoppingPolicy(evaluation_interval=1, delay_evaluation=0)
###Output
_____no_output_____
###Markdown
Refer [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparametersspecify-early-termination-policy?WT.mc_id=absa-notebook-abornst) for more information on the Median stopping policy and other policies available.Now that we've defined our early termination policy we can define our Hyper Drive configuration to maximize our Model's weighted F1 score. Hyper Drive can optimize any metric can be optimized as long as it's logged by the training script.
###Code
hd_config = HyperDriveConfig(estimator=nlp_est,
hyperparameter_sampling=param_sampling,
policy=early_termination_policy,
primary_metric_name='f1_weighted',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=16,
max_concurrent_runs=4)
###Output
_____no_output_____
###Markdown
Finally, lauch the hyperparameter tuning job.
###Code
experiment = Experiment(workspace=ws, name='absa_hyperdrive')
hyperdrive_run = experiment.submit(hd_config)
hyperdrive_run.id
hyperdrive_run = [r for r in experiment.get_runs() if r.id == 'absa_hyperdrive_1578973612991526'][0]
###Output
_____no_output_____
###Markdown
Monitor HyperDrive runsWe can monitor the progress of the runs with the following Jupyter widget.
###Code
from azureml.widgets import RunDetails
RunDetails(hyperdrive_run).show()
hyperdrive_run.cancel()
###Output
_____no_output_____
###Markdown
Find and register the best modelOnce all the runs complete, we can find the run that produced the model with the highest evaluation (METRIC TBD).
###Code
best_run = hyperdrive_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print(best_run)
print('Best Run is:\n F1: {0:.5f}'.format(
best_run_metrics['f1_weighted']
))
best_run.get_file_names()
best_run.download_files()
import os
from shutil import copyfile, rmtree
if os.path.exists('model'):
rmtree('model')
os.makedirs('model')
aspect_lex = copyfile('outputs/generated_aspect_lex.csv', 'model/generated_aspect_lex.csv')
opinion_lex = copyfile('outputs/generated_opinion_lex_reranked.csv', 'model/generated_opinion_lex_reranked.csv')
best_run.upload_folder(name="model", path="model")
###Output
_____no_output_____
###Markdown
Register Model Outputs
###Code
model = best_run.register_model(model_name='absa', model_path='model')
from azureml.core import Model
model = Model.register(workspace=ws, model_name='absa', model_path='model',
description='Aspect Based Sentiment Analysis - Intel',
tags={'area': 'NLP', 'type': 'unsupervised', 'model_author': "INTEL"})
###Output
Registering model absa
###Markdown
Test Locally Install Local PIP Dependencies
###Code
!pip install git+https://github.com/NervanaSystems/nlp-architect.git@absa
!pip install spacy==2.0.18
###Output
_____no_output_____
###Markdown
Load Model From AzureML
###Code
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
c_aspect_lex = 'outputs/generated_aspect_lex.csv'
c_opinion_lex = 'outputs/generated_opinion_lex_reranked.csv'
inference = SentimentInference(c_aspect_lex, c_opinion_lex)
###Output
_____no_output_____
###Markdown
Run Model On Sample Data
###Code
docs = ["Loved the sweater but hated the pants",
"Really great outfit, but the shirt is the wrong size",
"I absolutely love this jacket! i wear it almost everyday. works as a cardigan or a jacket. my favorite retailer purchase so far"]
sentiment_docs = []
for doc_raw in docs:
sentiment_doc = inference.run(doc=doc_raw)
sentiment_docs.append(sentiment_doc)
###Output
Processing batch 0
Batch 0 Done
Processing batch 0
Batch 0 Done
Processing batch 0
Batch 0 Done
###Markdown
Visualize Model Results
###Code
import spacy
from spacy import displacy
from nlp_architect.models.absa.inference.data_types import TermType
ents = []
for doc in sentiment_docs:
if doc:
doc_viz = {'text':doc._doc_text, 'ents':[]}
for s in doc._sentences:
for ev in s._events:
for e in ev:
if e._type == TermType.ASPECT:
ent = {'start': e._start, 'end': e._start + e._len,
'label':str(e._polarity.value),
'text':str(e._text)}
if all(kown_e['start'] != ent['start'] for kown_e in ents):
ents.append(ent)
doc_viz['ents'].append(ent)
doc_viz['ents'].sort(key=lambda m: m["start"])
displacy.render(doc_viz, style="ent", options={'colors':{'POS':'#7CFC00', 'NEG':'#FF0000'}},
manual=True, jupyter=True)
###Output
_____no_output_____
###Markdown
Create configuration files Create Enviorment Filecreate an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs nlp-architect and the azureml-sdk.
###Code
from azureml.core.conda_dependencies import CondaDependencies
pip = ["azureml-defaults", "azureml-monitoring",
"git+https://github.com/NervanaSystems/nlp-architect.git@absa",
"spacy==2.0.18",
""]
myenv = CondaDependencies.create(pip_packages=pip)
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Create Environment ConfigCreate a Enviorment configuration file and specify the enviroment and enviormental variables required for the application
###Code
from azureml.core import Environment
deploy_env = Environment.from_conda_specification('absa_env', "myenv.yml")
deploy_env.environment_variables={'NLP_ARCHITECT_BE': 'CPU'}
###Output
_____no_output_____
###Markdown
Inference and Deployment Config Create an inference configuration that recieves the deployment enviorment and the entry script as well as a deployment configuration to run inferences
###Code
from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice
inference_config = InferenceConfig(environment=deploy_env,
entry_script="score.py")
deploy_config = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
description='Aspect-Based Sentiment Analysis - Intel')
###Output
_____no_output_____
###Markdown
Quick Deploy!Create a deployment of the model using the scoring file.
###Code
deployment = Model.deploy(ws, 'absa',
models=[model],
inference_config=inference_config,
deployment_config=deploy_config,
overwrite=True)
###Output
_____no_output_____
###Markdown
Now the the glove file is uploaded to our datastore we can remove it from our local directory.
###Code
!rm 'dataset/glove.840B.300d.zip'
###Output
_____no_output_____
###Markdown
Train File > **It does not matter if you execute this cell or not, because it will just overwrite the file. You may execute it, just to make the demo more live**
###Code
%%writefile train.py
import argparse
import json
import os
from pathlib import Path
from nltk import flatten
from azureml.core import Run
from sklearn.metrics import f1_score
from azureml.core.model import Model
# Load NLP Architect
from nlp_architect.models.absa.train.train import TrainSentiment
from nlp_architect.models.absa.inference.inference import SentimentInference
# Inputs
parser = argparse.ArgumentParser(description='ABSA Train')
parser.add_argument('--data_folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--asp_thresh', type=int, default=3)
parser.add_argument('--op_thresh', type=int, default=2)
parser.add_argument('--max_iter', type=int, default=3)
args = parser.parse_args()
# Download ABSA dependencies including spacy parser and glove embeddings
from spacy.cli.download import download as spacy_download
from nlp_architect.utils.io import uncompress_file
from nlp_architect.models.absa import TRAIN_OUT
spacy_download('en')
GLOVE_ZIP = os.path.join(args.data_folder,
'clothing_data/glove.840B.300d.zip')
EMBEDDING_PATH = TRAIN_OUT / 'word_emb_unzipped' / 'glove.840B.300d.txt'
uncompress_file(GLOVE_ZIP, Path(EMBEDDING_PATH).parent)
clothing_train = os.path.join(args.data_folder,
'clothing_data/clothing_absa_train_small.csv')
os.makedirs('outputs', exist_ok=True)
train = TrainSentiment(asp_thresh=args.asp_thresh,
op_thresh=args.op_thresh,
max_iter=args.max_iter)
opinion_lex, aspect_lex = train.run(data=clothing_train,
out_dir = './outputs')
# Evaluation
# Although ABSA is an unsupervised method it can be metriced with a small sample of labeled data
def doc2IO(doc):
"""
Converts ABSA doc to IO span format for evaluation
"""
index = 0
aspect_indexes = []
doc_json = json.loads(doc.json())
tokens = doc_json["_doc_text"].split()
io = [[t,'O'] for t in tokens]
for t_index, token in enumerate(tokens):
for s in doc_json["_sentences"]:
for ev in s["_events"]:
for e in ev:
if e["_type"] == "ASPECT":
if e["_start"] == index and all(aspect[0] != t_index for aspect in aspect_indexes):
io[t_index][1] = "{}-{}".format(e["_text"], e["_polarity"])
index += len(token) + 1
return io
inference = SentimentInference('./outputs/train_out/generated_aspect_lex.csv',
'./outputs/train_out/generated_opinion_lex_reranked.csv')
clothing_val = os.path.join(args.data_folder,
'clothing_data/clothing-absa-validation.json')
with open(clothing_val) as json_file:
val = json.load(json_file)
predictions = []
for doc in val["data"]:
doc_raw = " ".join([token[0] for token in doc])
sentiment_doc = inference.run(doc=doc_raw)
predictions.append(doc2IO(sentiment_doc))
y_pred = flatten(predictions)[1::2]
y_true = flatten(val['data'])[1::2]
from sklearn.metrics import f1_score
# Log metrics
run = Run.get_context()
run.log('Aspect Lexicon Size', len(aspect_lex))
run.log('Opinion Lexicon Size', len(opinion_lex))
run.log('f1_weighted', float(f1_score(y_true, y_pred, average='weighted')))
###Output
Overwriting train.py
###Markdown
Create An ExpiermentCreate an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureexperiment/?WT.mc_id=absa-notebook-abornst) to track all the runs in your workspace for this distributed PyTorch tutorial. > **In most of the cases, you want to skip the following 3 cells during the demo, in order not to run the experiment again. However, you may also start another experiment if time permists, in which case you can run them**
###Code
from azureml.core import Experiment
experiment_name = 'absa'
exp = Experiment(workspace=ws, name=experiment_name)
from azureml.train.estimator import Estimator
script_params = {
'--data_folder': ds,
}
nlp_est = Estimator(source_directory='.',
script_params=script_params,
compute_target=cluster,
environment_variables = {'NLP_ARCHITECT_BE':'CPU'},
entry_script='train.py',
pip_packages=['git+https://github.com/NervanaSystems/nlp-architect.git@absa',
'spacy==2.1.8']
)
run = exp.submit(nlp_est)
run_id = run.id
print(run_id)
###Output
_____no_output_____
###Markdown
> **To retrieve the run, we use run id here. It can either be hard-coded from the previous pre-demo run, or you can rely on the jupyter kernel not restarting, in which case it will be saved in the `run_id` variable. So, if the jupyter engine has not been restarted, you may run cell 2, otherwise run cell 1**
###Code
run = [r for r in exp.get_runs() if r.id == 'absa_1568985331_df076c3c'][0]
run = [r for r in exp.get_runs() if r.id == run_id][0]
###Output
_____no_output_____
###Markdown
> **Run this to show the result of the run, either in progress or completed**
###Code
from azureml.widgets import RunDetails
RunDetails(run).show()
###Output
_____no_output_____
###Markdown
Fine-Tuning NLP Archictect with AzureML HyperDriveAlthough ABSA is an unsupervised method it's hyper parameters such as the aspect and opinion word thresholds can be fined tuned if provided with a small sample of labeled data
###Code
from azureml.train.hyperdrive import *
import math
param_sampling = RandomParameterSampling({
'--asp_thresh': choice(range(2,5)),
'--op_thresh': choice(range(2,5)),
'--max_iter': choice(range(2,5))
})
###Output
_____no_output_____
###Markdown
Early Termination PolicyFirst we will define an early terminination policy. [Median stopping](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.medianstoppingpolicy?WT.mc_id=absa-notebook-abornst) is an early termination policy based on running averages of primary metrics reported by the runs. This policy computes running averages across all training runs and terminates runs whose performance is worse than the median of the running averages. This policy takes the following configuration parameters:- evaluation_interval: the frequency for applying the policy (optional parameter).- delay_evaluation: delays the first policy evaluation for a specified number of intervals (optional parameter).
###Code
early_termination_policy = MedianStoppingPolicy(evaluation_interval=1, delay_evaluation=0)
###Output
_____no_output_____
###Markdown
Refer [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparametersspecify-early-termination-policy?WT.mc_id=absa-notebook-abornst) for more information on the Median stopping policy and other policies available.Now that we've defined our early termination policy we can define our Hyper Drive configuration to maximize our Model's weighted F1 score. Hyper Drive can optimize any metric can be optimized as long as it's logged by the training script.
###Code
hd_config = HyperDriveConfig(estimator=nlp_est,
hyperparameter_sampling=param_sampling,
policy=early_termination_policy,
primary_metric_name='f1_weighted',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=16,
max_concurrent_runs=4)
###Output
_____no_output_____
###Markdown
Finally, lauch the hyperparameter tuning job.
###Code
experiment = Experiment(workspace=ws, name='absa_hyperdrive')
hyperdrive_run = experiment.submit(hd_config)
hyperdrive_run.id
hyperdrive_run = [r for r in experiment.get_runs() if r.id == 'absa_hyperdrive_1571092544235933'][0]
###Output
_____no_output_____
###Markdown
Monitor HyperDrive runsWe can monitor the progress of the runs with the following Jupyter widget.
###Code
from azureml.widgets import RunDetails
RunDetails(hyperdrive_run).show()
hyperdrive_run.cancel()
###Output
_____no_output_____
###Markdown
Find and register the best modelOnce all the runs complete, we can find the run that produced the model with the highest evaluation (METRIC TBD).
###Code
best_run = hyperdrive_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print(best_run)
print('Best Run is:\n F1: {0:.5f}'.format(
best_run_metrics['f1_weighted']
))
###Output
_____no_output_____
###Markdown
Register Model Outputs
###Code
aspect_lex = run.register_model(model_name='aspect_lex', model_path='outputs/train_out/generated_aspect_lex.csv')
opinion_lex = run.register_model(model_name='opinion_lex', model_path='outputs/train_out/generated_opinion_lex_reranked.csv')
###Output
_____no_output_____
###Markdown
Test Locally Install Local PIP Dependencies
###Code
!pip install git+https://github.com/NervanaSystems/nlp-architect.git@absa
!pip install spacy==2.0.18
###Output
_____no_output_____
###Markdown
Load Model From AzureML
###Code
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
c_aspect_lex = Model._get_model_path_remote('c_aspect_lex', 1, ws)
c_opinion_lex = Model._get_model_path_remote('c_opinion_lex', 1, ws)
inference = SentimentInference(c_aspect_lex, c_opinion_lex)
###Output
Using pre-trained BIST model.
###Markdown
Run Model On Sample Data
###Code
docs = ["Loved the sweater but hated the pants",
"Really great outfit, but the shirt is the wrong size",
"I absolutely love this jacket! i wear it almost everyday. works as a cardigan or a jacket. my favorite retailer purchase so far"]
sentiment_docs = []
for doc_raw in docs:
sentiment_doc = inference.run(doc=doc_raw)
sentiment_docs.append(sentiment_doc)
###Output
_____no_output_____
###Markdown
Visualize Model Results
###Code
import spacy
from spacy import displacy
from nlp_architect.models.absa.inference.data_types import TermType
ents = []
for doc in sentiment_docs:
if doc:
doc_viz = {'text':doc._doc_text, 'ents':[]}
for s in doc._sentences:
for ev in s._events:
for e in ev:
if e._type == TermType.ASPECT:
ent = {'start': e._start, 'end': e._start + e._len,
'label':str(e._polarity.value),
'text':str(e._text)}
if all(kown_e['start'] != ent['start'] for kown_e in ents):
ents.append(ent)
doc_viz['ents'].append(ent)
doc_viz['ents'].sort(key=lambda m: m["start"])
displacy.render(doc_viz, style="ent", options={'colors':{'POS':'#7CFC00', 'NEG':'#FF0000'}}, manual=True)
###Output
_____no_output_____
###Markdown
Package Model For Deployment Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:The init() function, which typically loads the model into a global object. This function is run only once when the Docker container is started.The run(input_data) function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
from spacy.cli.download import download as spacy_download
def init():
"""
Set up the ABSA model for Inference
"""
global SentInference
spacy_download('en')
aspect_lex = Model.get_model_path('c_aspect_lex')
opinion_lex = Model.get_model_path('c_opinion_lex')
SentInference = SentimentInference(aspect_lex, opinion_lex)
def run(raw_data):
"""
Evaluate the model and return JSON string
"""
sentiment_doc = SentInference.run(doc=raw_data)
return sentiment_doc.json()
###Output
_____no_output_____
###Markdown
Create configuration files Create Enviorment Filecreate an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs nlp-architect and the azureml-sdk.
###Code
from azureml.core.conda_dependencies import CondaDependencies
pip = ["azureml-defaults", "azureml-monitoring",
"git+https://github.com/NervanaSystems/nlp-architect.git@absa",
"spacy==2.0.18",
""]
myenv = CondaDependencies.create(pip_packages=pip)
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Create Environment ConfigCreate a Enviorment configuration file and specify the enviroment and enviormental variables required for the application
###Code
from azureml.core import Environment
deploy_env = Environment.from_conda_specification('absa_env', "myenv.yml")
deploy_env.environment_variables={'NLP_ARCHITECT_BE': 'CPU'}
###Output
_____no_output_____
###Markdown
Inference Config Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(environment=deploy_env,
entry_script="score.py")
###Output
_____no_output_____
###Markdown
Package Model and Pull Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
package = Model.package(ws, [aspect_lex, opinion_lex], inference_config)
package.wait_for_creation(show_output=True)
package.pull()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Intel NLP-Architect ABSA on AzureML INSTRUCTOR VERSION> **This instructor version of the notebook gives additional instructions as to which cells should be run in demo mode, and which should not. It assumes that before the demo you will execute the complete notebook, and then during the demo certain cells would be re-run to demonstrate working process.**This notebook contains an end-to-end walkthrough of using Azure Machine Learning Service to train, finetune and test [Aspect Based Sentiment Analysis Models using Intel's NLP Architect](http://nlp_architect.nervanasys.com/absa.html) Prerequisites* Understand the architecture and terms introduced by Azure Machine Learning (AML)* Have working Jupyter Notebook Environment. You can: - Install Python environment locally, as described below in **Local Installation** - Use [Azure Notebooks](https://docs.microsoft.com/ru-ru/azure/notebooks/azure-notebooks-overview/?wt.mc_id=absa-notebook-abornst). In this case you should upload the `absa.ipynb` file to a new Azure Notebooks project, or just clone the [GitHub Repo](https://github.com/microsoft/ignite-learning-paths/tree/master/aiml/aiml40).* Azure Machine Learning Workspace in your Azure Subscription Local InstallationInstall the Python SDK: make sure to install notebook, and contrib:```shellconda create -n azureml -y Python=3.6source activate azuremlpip install --upgrade azureml-sdk[notebooks,contrib] conda install ipywidgetsjupyter nbextension install --py --user azureml.widgetsjupyter nbextension enable azureml.widgets --user --py```You will need to restart jupyter after this Detailed instructions are [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python/?WT.mc_id=absa-notebook-abornst)If you need a free trial account to get started you can get one [here](https://azure.microsoft.com/en-us/offers/ms-azr-0044p/?WT.mc_id=absa-notebook-abornst) Creating Azure ML WorkspaceAzure ML Workspace can be created by using one of the following ways:* Manually through [Azure Portal](http://portal.azure.com/?WT.mc_id=absa-notebook-abornst) - [here is the complete walkthrough](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace/?wt.mc_id=absa-notebook-abornst)* Using [Azure CLI](https://docs.microsoft.com/ru-ru/cli/azure/?view=azure-cli-latest&wt.mc_id=absa-notebook-abornst), using the following commands:```shellaz extension add -n azure-cli-mlaz group create -n absa -l westus2az ml workspace create -w absa_space -g absa``` Initialize workspaceTo access an Azure ML Workspace, you will need to import the AML library and the following information:* A name for your workspace (in our example - `absa_space`)* Your subscription id (can be obtained by running `az account list`)* The resource group name (in our case `absa`)Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace/?WT.mc_id=absa-notebook-abornst) object from the existing workspace you created in the Prerequisites step or create a new one. > **This cell can be run without problem, because it will just create a connection object for the workspace. Make sure to insert the correct `subscription_id` value before use, or have `config.json` file ready.**
###Code
from azureml.core import Workspace
#subscription_id = ''
#resource_group = 'absa'
#workspace_name = 'absa_space'
#ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
#ws.write_config()
try:
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
print('Library configuration succeeded')
except:
print('Workspace not found')
###Output
abla_space westeurope abla westeurope
Library configuration succeeded
###Markdown
Compute There are two computer option run once(preview) and persistent compute for this demo we will use persistent compute to learn more about run once compute check out the [docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targetsamlcompute?WT.mc_id=absa-notebook-abornst). > **This cell can be run because it will not re-create a cluster. Although it does not make much sense to run it**
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cluster_name = "absa-cluster"
# Verify that cluster does not exist already
try:
cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D3_V2',
vm_priority='lowpriority',
min_nodes=1,
max_nodes=4)
cluster = ComputeTarget.create(ws, cluster_name, compute_config)
cluster.wait_for_completion(show_output=True)
###Output
Found existing cluster, use it.
Succeeded
AmlCompute wait for completion finished
Minimum number of nodes requested have been provisioned
###Markdown
Upload DataThe dataset we are using comes from the [womens ecommerce clothing reviews dataset](https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews/) and is in the open domain, this can be replaced with any csv file with rows of text as the absa model is unsupervised. The documentation for uploading data can be found [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.azure_storage_datastore.azureblobdatastore/?WT.mc_id=absa-notebook-abornst) for now we will us the ds.upload command.
###Code
# if using as a separate notebook - fetch files from github repo
if not os.path.isdir('dataset'):
!mkdir dataset
!wget -O 'dataset/clothing_absa_train_small.csv' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing_absa_train_small.csv'
!wget -O 'dataset/clothing_absa_train.csv' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing_absa_train.csv'
!wget -O 'dataset/clothing-absa-validation.json' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing-absa-validation.json'
!wget -O 'dataset/glove.840B.300d.zip' 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
import os
lib_root = os.path.dirname(os.path.abspath("__file__"))
ds = ws.get_default_datastore()
ds.upload('./dataset', target_path='clothing_data', overwrite=True, show_progress=True)
###Output
_____no_output_____
###Markdown
Now the the glove file is uploaded to our datastore we can remove it from our local directory.
###Code
!rm 'dataset/glove.840B.300d.zip'
###Output
_____no_output_____
###Markdown
Train File > **It does not matter if you execute this cell or not, because it will just overwrite the file. You may execute it, just to make the demo more live**
###Code
%%writefile train.py
import argparse
import json
import os
import shutil
from pathlib import Path
from nltk import flatten
from azureml.core import Run
from sklearn.metrics import f1_score
from azureml.core.model import Model
# Load NLP Architect
from nlp_architect.models.absa.train.train import TrainSentiment
from nlp_architect.models.absa.inference.inference import SentimentInference
# Inputs
parser = argparse.ArgumentParser(description='ABSA Train')
parser.add_argument('--data_folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--asp_thresh', type=int, default=3)
parser.add_argument('--op_thresh', type=int, default=2)
parser.add_argument('--max_iter', type=int, default=3)
args = parser.parse_args()
# Download ABSA dependencies including spacy parser and glove embeddings
from spacy.cli.download import download as spacy_download
from nlp_architect.utils.io import uncompress_file
from nlp_architect.models.absa import TRAIN_OUT, LEXICONS_OUT
spacy_download('en')
GLOVE_ZIP = os.path.join(args.data_folder,
'clothing_data/glove.840B.300d.zip')
EMBEDDING_PATH = TRAIN_OUT / 'word_emb_unzipped' / 'glove.840B.300d.txt'
uncompress_file(GLOVE_ZIP, Path(EMBEDDING_PATH).parent)
clothing_train = os.path.join(args.data_folder,
'clothing_data/clothing_absa_train_small.csv')
os.makedirs('outputs', exist_ok=True)
train = TrainSentiment(asp_thresh=args.asp_thresh,
op_thresh=args.op_thresh,
max_iter=args.max_iter)
opinion_lex, aspect_lex = train.run(data=clothing_train,
out_dir = './outputs')
#Copy lexicons to outputs folder
asp_lex = shutil.copy(LEXICONS_OUT / 'generated_aspect_lex.csv', './outputs')
op_lex = shutil.copy(LEXICONS_OUT / 'generated_opinion_lex_reranked.csv', './outputs')
# Evaluation
# Although ABSA is an unsupervised method it can be metriced with a small sample of labeled data
def doc2IO(doc):
"""
Converts ABSA doc to IO span format for evaluation
"""
index = 0
aspect_indexes = []
doc_json = json.loads(doc.json())
tokens = doc_json["_doc_text"].split()
io = [[t,'O'] for t in tokens]
for t_index, token in enumerate(tokens):
for s in doc_json["_sentences"]:
for ev in s["_events"]:
for e in ev:
if e["_type"] == "ASPECT":
if e["_start"] == index and all(aspect[0] != t_index for aspect in aspect_indexes):
io[t_index][1] = "{}-{}".format(e["_text"], e["_polarity"])
index += len(token) + 1
return io
inference = SentimentInference(LEXICONS_OUT / 'generated_aspect_lex.csv',
LEXICONS_OUT / 'generated_opinion_lex_reranked.csv')
clothing_val = os.path.join(args.data_folder,
'clothing_data/clothing-absa-validation.json')
with open(clothing_val) as json_file:
val = json.load(json_file)
predictions = []
vals = []
for doc in val["data"]:
doc_raw = " ".join([token[0] for token in doc])
sentiment_doc = inference.run(doc=doc_raw)
if sentiment_doc is not None:
predictions.append(doc2IO(sentiment_doc))
vals.append(doc)
y_pred = flatten(predictions)[1::2]
y_true = flatten(vals)[1::2]
from sklearn.metrics import f1_score
# Log metrics
run = Run.get_context()
run.log('Aspect Lexicon Size', len(aspect_lex))
run.log('Opinion Lexicon Size', len(opinion_lex))
run.log('f1_weighted', float(f1_score(y_true, y_pred, average='weighted')))
###Output
Overwriting train.py
###Markdown
Create An ExpiermentCreate an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureexperiment/?WT.mc_id=absa-notebook-abornst) to track all the runs in your workspace for this distributed PyTorch tutorial. > **In most of the cases, you want to skip the following 3 cells during the demo, in order not to run the experiment again. However, you may also start another experiment if time permists, in which case you can run them**
###Code
from azureml.core import Experiment
experiment_name = 'absa'
exp = Experiment(workspace=ws, name=experiment_name)
from azureml.train.estimator import Estimator
script_params = {
'--data_folder': ds,
}
nlp_est = Estimator(source_directory='.',
script_params=script_params,
compute_target=cluster,
environment_variables = {'NLP_ARCHITECT_BE':'CPU'},
entry_script='train.py',
pip_packages=['git+https://github.com/NervanaSystems/nlp-architect.git@absa',
'spacy==2.1.8']
)
run = exp.submit(nlp_est)
run_id = run.id
print(run_id)
###Output
_____no_output_____
###Markdown
Note: If you accidently run the following cell more than once you can cancel a run with the run.cancel() command.
###Code
# run.cancel()
###Output
_____no_output_____
###Markdown
> **To retrieve the run, we use run id here. It can either be hard-coded from the previous pre-demo run, or you can rely on the jupyter kernel not restarting, in which case it will be saved in the `run_id` variable. So, if the jupyter engine has not been restarted, you may run cell 2, otherwise run cell 1**
###Code
run = [r for r in exp.get_runs() if r.id == 'absa_1568985331_df076c3c'][0]
run = [r for r in exp.get_runs() if r.id == run_id][0]
###Output
_____no_output_____
###Markdown
> **Run this to show the result of the run, either in progress or completed**
###Code
from azureml.widgets import RunDetails
RunDetails(run).show()
###Output
_____no_output_____
###Markdown
Fine-Tuning NLP Archictect with AzureML HyperDriveAlthough ABSA is an unsupervised method it's hyper parameters such as the aspect and opinion word thresholds can be fined tuned if provided with a small sample of labeled data
###Code
from azureml.train.hyperdrive import *
import math
param_sampling = RandomParameterSampling({
'--asp_thresh': choice(range(2,5)),
'--op_thresh': choice(range(2,5)),
'--max_iter': choice(range(2,5))
})
###Output
_____no_output_____
###Markdown
Early Termination PolicyFirst we will define an early terminination policy. [Median stopping](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.medianstoppingpolicy?WT.mc_id=absa-notebook-abornst) is an early termination policy based on running averages of primary metrics reported by the runs. This policy computes running averages across all training runs and terminates runs whose performance is worse than the median of the running averages. This policy takes the following configuration parameters:- evaluation_interval: the frequency for applying the policy (optional parameter).- delay_evaluation: delays the first policy evaluation for a specified number of intervals (optional parameter).
###Code
early_termination_policy = MedianStoppingPolicy(evaluation_interval=1, delay_evaluation=0)
###Output
_____no_output_____
###Markdown
Refer [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparametersspecify-early-termination-policy?WT.mc_id=absa-notebook-abornst) for more information on the Median stopping policy and other policies available.Now that we've defined our early termination policy we can define our Hyper Drive configuration to maximize our Model's weighted F1 score. Hyper Drive can optimize any metric can be optimized as long as it's logged by the training script.
###Code
hd_config = HyperDriveConfig(estimator=nlp_est,
hyperparameter_sampling=param_sampling,
policy=early_termination_policy,
primary_metric_name='f1_weighted',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=16,
max_concurrent_runs=4)
###Output
_____no_output_____
###Markdown
Finally, lauch the hyperparameter tuning job.
###Code
experiment = Experiment(workspace=ws, name='absa_hyperdrive')
hyperdrive_run = experiment.submit(hd_config)
hyperdrive_run.id
hyperdrive_run = [r for r in experiment.get_runs() if r.id == 'absa_hyperdrive_1571092544235933'][0]
###Output
_____no_output_____
###Markdown
Monitor HyperDrive runsWe can monitor the progress of the runs with the following Jupyter widget.
###Code
from azureml.widgets import RunDetails
RunDetails(hyperdrive_run).show()
hyperdrive_run.cancel()
###Output
_____no_output_____
###Markdown
Find and register the best modelOnce all the runs complete, we can find the run that produced the model with the highest evaluation (METRIC TBD).
###Code
best_run = hyperdrive_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print(best_run)
print('Best Run is:\n F1: {0:.5f}'.format(
best_run_metrics['f1_weighted']
))
###Output
_____no_output_____
###Markdown
Register Model Outputs
###Code
aspect_lex = run.register_model(model_name='aspect_lex', model_path='outputs/generated_aspect_lex.csv')
opinion_lex = run.register_model(model_name='opinion_lex', model_path='outputs/generated_opinion_lex_reranked.csv')
###Output
_____no_output_____
###Markdown
Test Locally Install Local PIP Dependencies
###Code
!pip install git+https://github.com/NervanaSystems/nlp-architect.git@absa
!pip install spacy==2.0.18
###Output
_____no_output_____
###Markdown
Load Model From AzureML
###Code
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
c_aspect_lex = Model._get_model_path_remote('c_aspect_lex', 1, ws)
c_opinion_lex = Model._get_model_path_remote('c_opinion_lex', 1, ws)
inference = SentimentInference(c_aspect_lex, c_opinion_lex)
###Output
Using pre-trained BIST model.
###Markdown
Run Model On Sample Data
###Code
docs = ["Loved the sweater but hated the pants",
"Really great outfit, but the shirt is the wrong size",
"I absolutely love this jacket! i wear it almost everyday. works as a cardigan or a jacket. my favorite retailer purchase so far"]
sentiment_docs = []
for doc_raw in docs:
sentiment_doc = inference.run(doc=doc_raw)
sentiment_docs.append(sentiment_doc)
###Output
_____no_output_____
###Markdown
Visualize Model Results
###Code
import spacy
from spacy import displacy
from nlp_architect.models.absa.inference.data_types import TermType
ents = []
for doc in sentiment_docs:
if doc:
doc_viz = {'text':doc._doc_text, 'ents':[]}
for s in doc._sentences:
for ev in s._events:
for e in ev:
if e._type == TermType.ASPECT:
ent = {'start': e._start, 'end': e._start + e._len,
'label':str(e._polarity.value),
'text':str(e._text)}
if all(kown_e['start'] != ent['start'] for kown_e in ents):
ents.append(ent)
doc_viz['ents'].append(ent)
doc_viz['ents'].sort(key=lambda m: m["start"])
displacy.render(doc_viz, style="ent", options={'colors':{'POS':'#7CFC00', 'NEG':'#FF0000'}},
manual=True, jupyter=True)
###Output
_____no_output_____
###Markdown
Package Model For Deployment Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:The init() function, which typically loads the model into a global object. This function is run only once when the Docker container is started.The run(input_data) function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
from spacy.cli.download import download as spacy_download
def init():
"""
Set up the ABSA model for Inference
"""
global SentInference
spacy_download('en')
aspect_lex = Model.get_model_path('c_aspect_lex')
opinion_lex = Model.get_model_path('c_opinion_lex')
SentInference = SentimentInference(aspect_lex, opinion_lex)
def run(raw_data):
"""
Evaluate the model and return JSON string
"""
sentiment_doc = SentInference.run(doc=raw_data)
return sentiment_doc.json()
###Output
_____no_output_____
###Markdown
Create configuration files Create Enviorment Filecreate an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs nlp-architect and the azureml-sdk.
###Code
from azureml.core.conda_dependencies import CondaDependencies
pip = ["azureml-defaults", "azureml-monitoring",
"git+https://github.com/NervanaSystems/nlp-architect.git@absa",
"spacy==2.0.18",
""]
myenv = CondaDependencies.create(pip_packages=pip)
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Create Environment ConfigCreate a Enviorment configuration file and specify the enviroment and enviormental variables required for the application
###Code
from azureml.core import Environment
deploy_env = Environment.from_conda_specification('absa_env', "myenv.yml")
deploy_env.environment_variables={'NLP_ARCHITECT_BE': 'CPU'}
###Output
_____no_output_____
###Markdown
Inference Config Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(environment=deploy_env,
entry_script="score.py")
###Output
_____no_output_____
###Markdown
Package Model and Pull Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
package = Model.package(ws, [aspect_lex, opinion_lex], inference_config)
package.wait_for_creation(show_output=True)
package.pull()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Intel NLP-Architect ABSA on AzureML INSTRUCTOR VERSION> **This instructor version of the notebook gives additional instructions as to which cells should be run in demo mode, and which should not. It assumes that before the demo you will execute the complete notebook, and then during the demo certain cells would be re-run to demonstrate working process.**This notebook contains an end-to-end walkthrough of using Azure Machine Learning Service to train, finetune and test [Aspect Based Sentiment Analysis Models using Intel's NLP Architect](http://nlp_architect.nervanasys.com/absa.html) Prerequisites* Understand the architecture and terms introduced by Azure Machine Learning (AML)* Have working Jupyter Notebook Environment. You can: - Install Python environment locally, as described below in **Local Installation** - Use [Azure Notebooks](https://docs.microsoft.com/ru-ru/azure/notebooks/azure-notebooks-overview/?wt.mc_id=absa-notebook-abornst). In this case you should upload the `absa.ipynb` file to a new Azure Notebooks project, or just clone the [GitHub Repo](https://github.com/microsoft/ignite-learning-paths/tree/master/aiml/aiml40).* Azure Machine Learning Workspace in your Azure Subscription Local InstallationInstall the Python SDK: make sure to install notebook, and contrib:```shellconda create -n azureml -y Python=3.6source activate azuremlpip install --upgrade azureml-sdk[notebooks,contrib] conda install ipywidgetsjupyter nbextension install --py --user azureml.widgetsjupyter nbextension enable azureml.widgets --user --py```You will need to restart jupyter after this Detailed instructions are [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python/?WT.mc_id=absa-notebook-abornst)If you need a free trial account to get started you can get one [here](https://azure.microsoft.com/en-us/offers/ms-azr-0044p/?WT.mc_id=absa-notebook-abornst) Creating Azure ML WorkspaceAzure ML Workspace can be created by using one of the following ways:* Manually through [Azure Portal](http://portal.azure.com/?WT.mc_id=absa-notebook-abornst) - [here is the complete walkthrough](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace/?wt.mc_id=absa-notebook-abornst)* Using [Azure CLI](https://docs.microsoft.com/ru-ru/cli/azure/?view=azure-cli-latest&wt.mc_id=absa-notebook-abornst), using the following commands:```shellaz extension add -n azure-cli-mlaz group create -n absa -l westus2az ml workspace create -w absa_space -g absa``` Initialize workspaceTo access an Azure ML Workspace, you will need to import the AML library and the following information:* A name for your workspace (in our example - `absa_space`)* Your subscription id (can be obtained by running `az account list`)* The resource group name (in our case `absa`)Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace/?WT.mc_id=absa-notebook-abornst) object from the existing workspace you created in the Prerequisites step or create a new one. > **This cell can be run without problem, because it will just create a connection object for the workspace. Make sure to insert the correct `subscription_id` value before use, or have `config.json` file ready.**
###Code
from azureml.core import Workspace
#subscription_id = ''
#resource_group = 'absa'
#workspace_name = 'absa_space'
#ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
#ws.write_config()
try:
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
print('Library configuration succeeded')
except:
print('Workspace not found')
###Output
abla_space westeurope abla westeurope
Library configuration succeeded
###Markdown
Compute There are two computer option run once(preview) and persistent compute for this demo we will use persistent compute to learn more about run once compute check out the [docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targetsamlcompute?WT.mc_id=absa-notebook-abornst). > **This cell can be run because it will not re-create a cluster. Although it does not make much sense to run it**
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cluster_name = "absa-cluster"
# Verify that cluster does not exist already
try:
cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D3_V2',
vm_priority='lowpriority',
min_nodes=1,
max_nodes=4)
cluster = ComputeTarget.create(ws, cluster_name, compute_config)
cluster.wait_for_completion(show_output=True)
###Output
Found existing cluster, use it.
Succeeded
AmlCompute wait for completion finished
Minimum number of nodes requested have been provisioned
###Markdown
Upload DataThe dataset we are using comes from the [womens ecommerce clothing reviews dataset](https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews/) and is in the open domain, this can be replaced with any csv file with rows of text as the absa model is unsupervised. The documentation for uploading data can be found [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.azure_storage_datastore.azureblobdatastore/?WT.mc_id=absa-notebook-abornst) for now we will us the ds.upload command.
###Code
!wget -O 'dataset/glove.840B.300d.zip' 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
import os
lib_root = os.path.dirname(os.path.abspath("__file__"))
ds = ws.get_default_datastore()
ds.upload('./dataset', target_path='clothing_data', overwrite=True, show_progress=True)
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Intel NLP-Architect ABSA on AzureML INSTRUCTOR VERSION> **This instructor version of the notebook gives additional instructions as to which cells should be run in demo mode, and which should not. It assumes that before the demo you will execute the complete notebook, and then during the demo certain cells would be re-run to demonstrate working process.**This notebook contains an end-to-end walkthrough of using Azure Machine Learning Service to train, finetune and test [Aspect Based Sentiment Analysis Models using Intel's NLP Architect](http://nlp_architect.nervanasys.com/absa.html) Prerequisites* Understand the architecture and terms introduced by Azure Machine Learning (AML)* Have working Jupyter Notebook Environment. You can: - Install Python environment locally, as described below in **Local Installation** - Use [Azure Notebooks](https://docs.microsoft.com/ru-ru/azure/notebooks/azure-notebooks-overview/?wt.mc_id=absa-notebook-abornst). In this case you should upload the `absa.ipynb` file to a new Azure Notebooks project, or just clone the [GitHub Repo](https://github.com/microsoft/ignite-learning-paths/tree/master/aiml/aiml40).* Azure Machine Learning Workspace in your Azure Subscription Local InstallationInstall the Python SDK: make sure to install notebook, and contrib:```shellconda create -n azureml -y Python=3.6source activate azuremlpip install --upgrade azureml-sdk[notebooks,contrib] conda install ipywidgetsjupyter nbextension install --py --user azureml.widgetsjupyter nbextension enable azureml.widgets --user --py```You will need to restart jupyter after this Detailed instructions are [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python/?WT.mc_id=absa-notebook-abornst)If you need a free trial account to get started you can get one [here](https://azure.microsoft.com/en-us/offers/ms-azr-0044p/?WT.mc_id=absa-notebook-abornst) Creating Azure ML WorkspaceAzure ML Workspace can be created by using one of the following ways:* Manually through [Azure Portal](http://portal.azure.com/?WT.mc_id=absa-notebook-abornst) - [here is the complete walkthrough](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace/?wt.mc_id=absa-notebook-abornst)* Using [Azure CLI](https://docs.microsoft.com/ru-ru/cli/azure/?view=azure-cli-latest&wt.mc_id=absa-notebook-abornst), using the following commands:```shellaz extension add -n azure-cli-mlaz group create -n absa -l westus2az ml workspace create -w absa_space -g absa``` Initialize workspaceTo access an Azure ML Workspace, you will need to import the AML library and the following information:* A name for your workspace (in our example - `absa_space`)* Your subscription id (can be obtained by running `az account list`)* The resource group name (in our case `absa`)Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace/?WT.mc_id=absa-notebook-abornst) object from the existing workspace you created in the Prerequisites step or create a new one. > **This cell can be run without problem, because it will just create a connection object for the workspace. Make sure to insert the correct `subscription_id` value before use, or have `config.json` file ready.**
###Code
from azureml.core import Workspace
#subscription_id = ''
#resource_group = 'absa'
#workspace_name = 'absa_space'
#ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
#ws.write_config()
try:
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
print('Library configuration succeeded')
except:
print('Workspace not found')
###Output
abla_space westeurope abla westeurope
Library configuration succeeded
###Markdown
Compute There are two computer option run once(preview) and persistent compute for this demo we will use persistent compute to learn more about run once compute check out the [docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targetsamlcompute?WT.mc_id=absa-notebook-abornst). > **This cell can be run because it will not re-create a cluster. Although it does not make much sense to run it**
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cluster_name = "absa-cluster"
# Verify that cluster does not exist already
try:
cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D3_V2',
vm_priority='lowpriority',
min_nodes=1,
max_nodes=4)
cluster = ComputeTarget.create(ws, cluster_name, compute_config)
cluster.wait_for_completion(show_output=True)
###Output
Found existing cluster, use it.
Succeeded
AmlCompute wait for completion finished
Minimum number of nodes requested have been provisioned
###Markdown
Upload DataThe dataset we are using comes from the [womens ecommerce clothing reviews dataset](https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews/) and is in the open domain, this can be replaced with any csv file with rows of text as the absa model is unsupervised. The documentation for uploading data can be found [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.azure_storage_datastore.azureblobdatastore/?WT.mc_id=absa-notebook-abornst) for now we will us the ds.upload command.
###Code
!wget -O 'dataset/glove.840B.300d.zip' 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
import os
lib_root = os.path.dirname(os.path.abspath("__file__"))
ds = ws.get_default_datastore()
ds.upload('./dataset', target_path='clothing_data', overwrite=True, show_progress=True)
###Output
_____no_output_____
###Markdown
Now the the glove file is uploaded to our datastore we can remove it from our local directory.
###Code
!rm 'dataset/glove.840B.300d.zip'
###Output
_____no_output_____
###Markdown
Train File > **It does not matter if you execute this cell or not, because it will just overwrite the file. You may execute it, just to make the demo more live**
###Code
%%writefile train.py
import argparse
import json
import os
from pathlib import Path
from nltk import flatten
from azureml.core import Run
from sklearn.metrics import f1_score
from azureml.core.model import Model
# Load NLP Architect
from nlp_architect.models.absa.train.train import TrainSentiment
from nlp_architect.models.absa.inference.inference import SentimentInference
# Inputs
parser = argparse.ArgumentParser(description='ABSA Train')
parser.add_argument('--data_folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--asp_thresh', type=int, default=3)
parser.add_argument('--op_thresh', type=int, default=2)
parser.add_argument('--max_iter', type=int, default=3)
args = parser.parse_args()
# Download ABSA dependencies including spacy parser and glove embeddings
from spacy.cli.download import download as spacy_download
from nlp_architect.utils.io import uncompress_file
from nlp_architect.models.absa import TRAIN_OUT
spacy_download('en')
GLOVE_ZIP = os.path.join(args.data_folder,
'clothing_data/glove.840B.300d.zip')
EMBEDDING_PATH = TRAIN_OUT / 'word_emb_unzipped' / 'glove.840B.300d.txt'
uncompress_file(GLOVE_ZIP, Path(EMBEDDING_PATH).parent)
clothing_train = os.path.join(args.data_folder,
'clothing_data/clothing_absa_train_small.csv')
os.makedirs('outputs', exist_ok=True)
train = TrainSentiment(asp_thresh=args.asp_thresh,
op_thresh=args.op_thresh,
max_iter=args.max_iter)
opinion_lex, aspect_lex = train.run(data=clothing_train,
out_dir = './outputs')
# Evaluation
# Although ABSA is an unsupervised method it can be metriced with a small sample of labeled data
def doc2IO(doc):
"""
Converts ABSA doc to IO span format for evaluation
"""
index = 0
aspect_indexes = []
doc_json = json.loads(doc.json())
tokens = doc_json["_doc_text"].split()
io = [[t,'O'] for t in tokens]
for t_index, token in enumerate(tokens):
for s in doc_json["_sentences"]:
for ev in s["_events"]:
for e in ev:
if e["_type"] == "ASPECT":
if e["_start"] == index and all(aspect[0] != t_index for aspect in aspect_indexes):
io[t_index][1] = "{}-{}".format(e["_text"], e["_polarity"])
index += len(token) + 1
return io
inference = SentimentInference('./outputs/train_out/generated_aspect_lex.csv',
'./outputs/train_out/generated_opinion_lex_reranked.csv')
clothing_val = os.path.join(args.data_folder,
'clothing_data/clothing-absa-validation.json')
with open(clothing_val) as json_file:
val = json.load(json_file)
predictions = []
for doc in val["data"]:
doc_raw = " ".join([token[0] for token in doc])
sentiment_doc = inference.run(doc=doc_raw)
predictions.append(doc2IO(sentiment_doc))
y_pred = flatten(predictions)[1::2]
y_true = flatten(val['data'])[1::2]
from sklearn.metrics import f1_score
# Log metrics
run = Run.get_context()
run.log('Aspect Lexicon Size', len(aspect_lex))
run.log('Opinion Lexicon Size', len(opinion_lex))
run.log('f1_weighted', float(f1_score(y_true, y_pred, average='weighted')))
###Output
Overwriting train.py
###Markdown
Create An ExpiermentCreate an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureexperiment/?WT.mc_id=absa-notebook-abornst) to track all the runs in your workspace for this distributed PyTorch tutorial. > **In most of the cases, you want to skip the following 3 cells during the demo, in order not to run the experiment again. However, you may also start another experiment if time permists, in which case you can run them**
###Code
from azureml.core import Experiment
experiment_name = 'absa'
exp = Experiment(workspace=ws, name=experiment_name)
from azureml.train.estimator import Estimator
script_params = {
'--data_folder': ds,
}
nlp_est = Estimator(source_directory='.',
script_params=script_params,
compute_target=cluster,
environment_variables = {'NLP_ARCHITECT_BE':'CPU'},
entry_script='train.py',
pip_packages=['git+https://github.com/NervanaSystems/nlp-architect.git@absa',
'spacy==2.1.8']
)
run = exp.submit(nlp_est)
run_id = run.id
print(run_id)
###Output
_____no_output_____
###Markdown
> **To retrieve the run, we use run id here. It can either be hard-coded from the previous pre-demo run, or you can rely on the jupyter kernel not restarting, in which case it will be saved in the `run_id` variable. So, if the jupyter engine has not been restarted, you may run cell 2, otherwise run cell 1**
###Code
run = [r for r in exp.get_runs() if r.id == 'absa_1568985331_df076c3c'][0]
run = [r for r in exp.get_runs() if r.id == run_id][0]
###Output
_____no_output_____
###Markdown
> **Run this to show the result of the run, either in progress or completed**
###Code
from azureml.widgets import RunDetails
RunDetails(run).show()
###Output
_____no_output_____
###Markdown
Fine-Tuning NLP Archictect with AzureML HyperDriveAlthough ABSA is an unsupervised method it's hyper parameters such as the aspect and opinion word thresholds can be fined tuned if provided with a small sample of labeled data
###Code
from azureml.train.hyperdrive import *
import math
param_sampling = RandomParameterSampling({
'--asp_thresh': choice(range(2,5)),
'--op_thresh': choice(range(2,5)),
'--max_iter': choice(range(2,5))
})
###Output
_____no_output_____
###Markdown
Early Termination PolicyFirst we will define an early terminination policy. [Median stopping](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.medianstoppingpolicy?WT.mc_id=absa-notebook-abornst) is an early termination policy based on running averages of primary metrics reported by the runs. This policy computes running averages across all training runs and terminates runs whose performance is worse than the median of the running averages. This policy takes the following configuration parameters:- evaluation_interval: the frequency for applying the policy (optional parameter).- delay_evaluation: delays the first policy evaluation for a specified number of intervals (optional parameter).
###Code
early_termination_policy = MedianStoppingPolicy(evaluation_interval=1, delay_evaluation=0)
###Output
_____no_output_____
###Markdown
Refer [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparametersspecify-early-termination-policy?WT.mc_id=absa-notebook-abornst) for more information on the Median stopping policy and other policies available.Now that we've defined our early termination policy we can define our Hyper Drive configuration to maximize our Model's weighted F1 score. Hyper Drive can optimize any metric can be optimized as long as it's logged by the training script.
###Code
hd_config = HyperDriveConfig(estimator=nlp_est,
hyperparameter_sampling=param_sampling,
policy=early_termination_policy,
primary_metric_name='f1_weighted',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=16,
max_concurrent_runs=4)
###Output
_____no_output_____
###Markdown
Finally, lauch the hyperparameter tuning job.
###Code
experiment = Experiment(workspace=ws, name='absa_hyperdrive')
hyperdrive_run = experiment.submit(hd_config)
hyperdrive_run.id
hyperdrive_run = [r for r in experiment.get_runs() if r.id == 'absa_hyperdrive_1571092544235933'][0]
###Output
_____no_output_____
###Markdown
Monitor HyperDrive runsWe can monitor the progress of the runs with the following Jupyter widget.
###Code
from azureml.widgets import RunDetails
RunDetails(hyperdrive_run).show()
hyperdrive_run.cancel()
###Output
_____no_output_____
###Markdown
Find and register the best modelOnce all the runs complete, we can find the run that produced the model with the highest evaluation (METRIC TBD).
###Code
best_run = hyperdrive_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print(best_run)
print('Best Run is:\n F1: {0:.5f}'.format(
best_run_metrics['f1_weighted']
))
###Output
_____no_output_____
###Markdown
Register Model Outputs
###Code
aspect_lex = run.register_model(model_name='aspect_lex', model_path='outputs/train_out/generated_aspect_lex.csv')
opinion_lex = run.register_model(model_name='opinion_lex', model_path='outputs/train_out/generated_opinion_lex_reranked.csv')
###Output
_____no_output_____
###Markdown
Test Locally Install Local PIP Dependencies
###Code
!pip install git+https://github.com/NervanaSystems/nlp-architect.git@absa
!pip install spacy==2.0.18
###Output
_____no_output_____
###Markdown
Load Model From AzureML
###Code
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
c_aspect_lex = Model._get_model_path_remote('c_aspect_lex', 1, ws)
c_opinion_lex = Model._get_model_path_remote('c_opinion_lex', 1, ws)
inference = SentimentInference(c_aspect_lex, c_opinion_lex)
###Output
Using pre-trained BIST model.
###Markdown
Run Model On Sample Data
###Code
docs = ["Loved the sweater but hated the pants",
"Really great outfit, but the shirt is the wrong size",
"I absolutely love this jacket! i wear it almost everyday. works as a cardigan or a jacket. my favorite retailer purchase so far"]
sentiment_docs = []
for doc_raw in docs:
sentiment_doc = inference.run(doc=doc_raw)
sentiment_docs.append(sentiment_doc)
###Output
_____no_output_____
###Markdown
Visualize Model Results
###Code
import spacy
from spacy import displacy
from nlp_architect.models.absa.inference.data_types import TermType
ents = []
for doc in sentiment_docs:
if doc:
doc_viz = {'text':doc._doc_text, 'ents':[]}
for s in doc._sentences:
for ev in s._events:
for e in ev:
if e._type == TermType.ASPECT:
ent = {'start': e._start, 'end': e._start + e._len,
'label':str(e._polarity.value),
'text':str(e._text)}
if all(kown_e['start'] != ent['start'] for kown_e in ents):
ents.append(ent)
doc_viz['ents'].append(ent)
doc_viz['ents'].sort(key=lambda m: m["start"])
displacy.render(doc_viz, style="ent", options={'colors':{'POS':'#7CFC00', 'NEG':'#FF0000'}}, manual=True)
###Output
_____no_output_____
###Markdown
Package Model For Deployment Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:The init() function, which typically loads the model into a global object. This function is run only once when the Docker container is started.The run(input_data) function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
from spacy.cli.download import download as spacy_download
def init():
"""
Set up the ABSA model for Inference
"""
global SentInference
spacy_download('en')
aspect_lex = Model.get_model_path('c_aspect_lex')
opinion_lex = Model.get_model_path('c_opinion_lex')
SentInference = SentimentInference(aspect_lex, opinion_lex)
def run(raw_data):
"""
Evaluate the model and return JSON string
"""
sentiment_doc = SentInference.run(doc=raw_data)
return sentiment_doc.json()
###Output
_____no_output_____
###Markdown
Create configuration files Create Enviorment Filecreate an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs nlp-architect and the azureml-sdk.
###Code
from azureml.core.conda_dependencies import CondaDependencies
pip = ["azureml-defaults", "azureml-monitoring",
"git+https://github.com/NervanaSystems/nlp-architect.git@absa",
"spacy==2.0.18"]
myenv = CondaDependencies.create(pip_packages=pip)
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Create Environment ConfigCreate a Enviorment configuration file and specify the enviroment and enviormental variables required for the application
###Code
from azureml.core import Environment
deploy_env = Environment.from_conda_specification('absa_env', "myenv.yml")
deploy_env.environment_variables={'NLP_ARCHITECT_BE': 'CPU'}
###Output
_____no_output_____
###Markdown
Inference Config Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(environment=deploy_env,
entry_script="score.py")
###Output
_____no_output_____
###Markdown
Package Model and Pull Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
package = Model.package(ws, [aspect_lex, opinion_lex], inference_config)
package.wait_for_creation(show_output=True)
package.pull()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Intel NLP-Architect ABSA on AzureML INSTRUCTOR VERSION> **This instructor version of the notebook gives additional instructions as to which cells should be run in demo mode, and which should not. It assumes that before the demo you will execute the complete notebook, and then during the demo certain cells would be re-run to demonstrate working process.**This notebook contains an end-to-end walkthrough of using Azure Machine Learning Service to train, finetune and test [Aspect Based Sentiment Analysis Models using Intel's NLP Architect](http://nlp_architect.nervanasys.com/absa.html) Prerequisites* Understand the architecture and terms introduced by Azure Machine Learning (AML)* Have working Jupyter Notebook Environment. You can: - Install Python environment locally, as described below in **Local Installation** - Use [Azure Notebooks](https://docs.microsoft.com/ru-ru/azure/notebooks/azure-notebooks-overview/?wt.mc_id=absa-notebook-abornst). In this case you should upload the `absa.ipynb` file to a new Azure Notebooks project, or just clone the [GitHub Repo](https://github.com/microsoft/ignite-learning-paths-training-aiml/tree/master/aiml40).* Azure Machine Learning Workspace in your Azure Subscription Local InstallationInstall the Python SDK: make sure to install notebook, and contrib:```shellconda create -n azureml -y Python=3.6source activate azuremlpip install --upgrade azureml-sdk[notebooks,contrib] conda install ipywidgetsjupyter nbextension install --py --user azureml.widgetsjupyter nbextension enable azureml.widgets --user --py```You will need to restart jupyter after this Detailed instructions are [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python/?WT.mc_id=absa-notebook-abornst)If you need a free trial account to get started you can get one [here](https://azure.microsoft.com/en-us/offers/ms-azr-0044p/?WT.mc_id=absa-notebook-abornst) Creating Azure ML WorkspaceAzure ML Workspace can be created by using one of the following ways:* Manually through [Azure Portal](http://portal.azure.com/?WT.mc_id=absa-notebook-abornst) - [here is the complete walkthrough](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace/?wt.mc_id=absa-notebook-abornst)* Using [Azure CLI](https://docs.microsoft.com/ru-ru/cli/azure/?view=azure-cli-latest&wt.mc_id=absa-notebook-abornst), using the following commands:```shellaz extension add -n azure-cli-mlaz group create -n absa -l westus2az ml workspace create -w absa_space -g absa``` Initialize workspaceTo access an Azure ML Workspace, you will need to import the AML library and the following information:* A name for your workspace (in our example - `absa_space`)* Your subscription id (can be obtained by running `az account list`)* The resource group name (in our case `absa`)Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureworkspace/?WT.mc_id=absa-notebook-abornst) object from the existing workspace you created in the Prerequisites step or create a new one. > **This cell can be run without problem, because it will just create a connection object for the workspace. Make sure to insert the correct `subscription_id` value before use, or have `config.json` file ready.**
###Code
from azureml.core import Workspace
#subscription_id = ''
#resource_group = 'absa'
#workspace_name = 'absa_space'
#ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
#ws.write_config()
try:
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, ws.location, sep='\t')
print('Library configuration succeeded')
except:
print('Workspace not found')
###Output
abla_space westeurope abla westeurope
Library configuration succeeded
###Markdown
Compute There are two computer option run once(preview) and persistent compute for this demo we will use persistent compute to learn more about run once compute check out the [docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targetsamlcompute?WT.mc_id=absa-notebook-abornst). > **This cell can be run because it will not re-create a cluster. Although it does not make much sense to run it**
###Code
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cluster_name = "absa-cluster"
# Verify that cluster does not exist already
try:
cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D3_V2',
vm_priority='lowpriority',
min_nodes=1,
max_nodes=4)
cluster = ComputeTarget.create(ws, cluster_name, compute_config)
cluster.wait_for_completion(show_output=True)
###Output
Found existing cluster, use it.
Succeeded
AmlCompute wait for completion finished
Minimum number of nodes requested have been provisioned
###Markdown
Upload DataThe dataset we are using comes from the [womens ecommerce clothing reviews dataset](https://www.kaggle.com/nicapotato/womens-ecommerce-clothing-reviews/) and is in the open domain, this can be replaced with any csv file with rows of text as the absa model is unsupervised. The documentation for uploading data can be found [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.azure_storage_datastore.azureblobdatastore/?WT.mc_id=absa-notebook-abornst) for now we will us the ds.upload command.
###Code
# if using as a separate notebook - fetch files from github repo
if not os.path.isdir('dataset'):
!mkdir dataset
!wget -O 'dataset/clothing_absa_train_small.csv' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing_absa_train_small.csv'
!wget -O 'dataset/clothing_absa_train.csv' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing_absa_train.csv'
!wget -O 'dataset/clothing-absa-validation.json' 'https://raw.githubusercontent.com/microsoft/ignite-learning-paths-training-aiml/master/aiml40/dataset/clothing-absa-validation.json'
!wget -O 'dataset/glove.840B.300d.zip' 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
import os
lib_root = os.path.dirname(os.path.abspath("__file__"))
ds = ws.get_default_datastore()
ds.upload('./dataset', target_path='clothing_data', overwrite=True, show_progress=True)
###Output
_____no_output_____
###Markdown
Now the the glove file is uploaded to our datastore we can remove it from our local directory.
###Code
!rm 'dataset/glove.840B.300d.zip'
###Output
_____no_output_____
###Markdown
Train File > **It does not matter if you execute this cell or not, because it will just overwrite the file. You may execute it, just to make the demo more live**
###Code
%%writefile train.py
import argparse
import json
import os
import shutil
from pathlib import Path
from nltk import flatten
from azureml.core import Run
from sklearn.metrics import f1_score
from azureml.core.model import Model
# Load NLP Architect
from nlp_architect.models.absa.train.train import TrainSentiment
from nlp_architect.models.absa.inference.inference import SentimentInference
# Inputs
parser = argparse.ArgumentParser(description='ABSA Train')
parser.add_argument('--data_folder', type=str, dest='data_folder', help='data folder mounting point')
parser.add_argument('--asp_thresh', type=int, default=3)
parser.add_argument('--op_thresh', type=int, default=2)
parser.add_argument('--max_iter', type=int, default=3)
args = parser.parse_args()
# Download ABSA dependencies including spacy parser and glove embeddings
from spacy.cli.download import download as spacy_download
from nlp_architect.utils.io import uncompress_file
from nlp_architect.models.absa import TRAIN_OUT, LEXICONS_OUT
spacy_download('en')
GLOVE_ZIP = os.path.join(args.data_folder,
'clothing_data/glove.840B.300d.zip')
EMBEDDING_PATH = TRAIN_OUT / 'word_emb_unzipped' / 'glove.840B.300d.txt'
uncompress_file(GLOVE_ZIP, Path(EMBEDDING_PATH).parent)
clothing_train = os.path.join(args.data_folder,
'clothing_data/clothing_absa_train_small.csv')
os.makedirs('outputs', exist_ok=True)
train = TrainSentiment(asp_thresh=args.asp_thresh,
op_thresh=args.op_thresh,
max_iter=args.max_iter)
opinion_lex, aspect_lex = train.run(data=clothing_train,
out_dir = './outputs')
#Copy lexicons to outputs folder
asp_lex = shutil.copy(LEXICONS_OUT / 'generated_aspect_lex.csv', './outputs')
op_lex = shutil.copy(LEXICONS_OUT / 'generated_opinion_lex_reranked.csv', './outputs')
# Evaluation
# Although ABSA is an unsupervised method it can be metriced with a small sample of labeled data
def doc2IO(doc):
"""
Converts ABSA doc to IO span format for evaluation
"""
index = 0
aspect_indexes = []
doc_json = json.loads(doc.json())
tokens = doc_json["_doc_text"].split()
io = [[t,'O'] for t in tokens]
for t_index, token in enumerate(tokens):
for s in doc_json["_sentences"]:
for ev in s["_events"]:
for e in ev:
if e["_type"] == "ASPECT":
if e["_start"] == index and all(aspect[0] != t_index for aspect in aspect_indexes):
io[t_index][1] = "{}-{}".format(e["_text"], e["_polarity"])
index += len(token) + 1
return io
inference = SentimentInference(LEXICONS_OUT / 'generated_aspect_lex.csv',
LEXICONS_OUT / 'generated_opinion_lex_reranked.csv')
clothing_val = os.path.join(args.data_folder,
'clothing_data/clothing-absa-validation.json')
with open(clothing_val) as json_file:
val = json.load(json_file)
predictions = []
vals = []
for doc in val["data"]:
doc_raw = " ".join([token[0] for token in doc])
sentiment_doc = inference.run(doc=doc_raw)
if sentiment_doc is not None:
predictions.append(doc2IO(sentiment_doc))
vals.append(doc)
y_pred = flatten(predictions)[1::2]
y_true = flatten(vals)[1::2]
from sklearn.metrics import f1_score
# Log metrics
run = Run.get_context()
run.log('Aspect Lexicon Size', len(aspect_lex))
run.log('Opinion Lexicon Size', len(opinion_lex))
run.log('f1_weighted', float(f1_score(y_true, y_pred, average='weighted')))
###Output
Overwriting train.py
###Markdown
Create An ExpiermentCreate an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architectureexperiment/?WT.mc_id=absa-notebook-abornst) to track all the runs in your workspace for this distributed PyTorch tutorial. > **In most of the cases, you want to skip the following 3 cells during the demo, in order not to run the experiment again. However, you may also start another experiment if time permists, in which case you can run them**
###Code
from azureml.core import Experiment
experiment_name = 'absa'
exp = Experiment(workspace=ws, name=experiment_name)
from azureml.train.estimator import Estimator
script_params = {
'--data_folder': ds,
}
nlp_est = Estimator(source_directory='.',
script_params=script_params,
compute_target=cluster,
environment_variables = {'NLP_ARCHITECT_BE':'CPU'},
entry_script='train.py',
pip_packages=['git+https://github.com/NervanaSystems/nlp-architect.git@absa',
'spacy==2.1.8']
)
run = exp.submit(nlp_est)
run_id = run.id
print(run_id)
###Output
_____no_output_____
###Markdown
Note: If you accidently run the following cell more than once you can cancel a run with the run.cancel() command.
###Code
# run.cancel()
###Output
_____no_output_____
###Markdown
> **To retrieve the run, we use run id here. It can either be hard-coded from the previous pre-demo run, or you can rely on the jupyter kernel not restarting, in which case it will be saved in the `run_id` variable. So, if the jupyter engine has not been restarted, you may run cell 2, otherwise run cell 1**
###Code
run = [r for r in exp.get_runs() if r.id == 'absa_1568985331_df076c3c'][0]
run = [r for r in exp.get_runs() if r.id == run_id][0]
###Output
_____no_output_____
###Markdown
> **Run this to show the result of the run, either in progress or completed**
###Code
from azureml.widgets import RunDetails
RunDetails(run).show()
###Output
_____no_output_____
###Markdown
Fine-Tuning NLP Archictect with AzureML HyperDriveAlthough ABSA is an unsupervised method it's hyper parameters such as the aspect and opinion word thresholds can be fined tuned if provided with a small sample of labeled data
###Code
from azureml.train.hyperdrive import *
import math
param_sampling = RandomParameterSampling({
'--asp_thresh': choice(range(2,5)),
'--op_thresh': choice(range(2,5)),
'--max_iter': choice(range(2,5))
})
###Output
_____no_output_____
###Markdown
Early Termination PolicyFirst we will define an early terminination policy. [Median stopping](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.medianstoppingpolicy?WT.mc_id=absa-notebook-abornst) is an early termination policy based on running averages of primary metrics reported by the runs. This policy computes running averages across all training runs and terminates runs whose performance is worse than the median of the running averages. This policy takes the following configuration parameters:- evaluation_interval: the frequency for applying the policy (optional parameter).- delay_evaluation: delays the first policy evaluation for a specified number of intervals (optional parameter).
###Code
early_termination_policy = MedianStoppingPolicy(evaluation_interval=1, delay_evaluation=0)
###Output
_____no_output_____
###Markdown
Refer [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparametersspecify-early-termination-policy?WT.mc_id=absa-notebook-abornst) for more information on the Median stopping policy and other policies available.Now that we've defined our early termination policy we can define our Hyper Drive configuration to maximize our Model's weighted F1 score. Hyper Drive can optimize any metric can be optimized as long as it's logged by the training script.
###Code
hd_config = HyperDriveConfig(estimator=nlp_est,
hyperparameter_sampling=param_sampling,
policy=early_termination_policy,
primary_metric_name='f1_weighted',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=16,
max_concurrent_runs=4)
###Output
_____no_output_____
###Markdown
Finally, lauch the hyperparameter tuning job.
###Code
experiment = Experiment(workspace=ws, name='absa_hyperdrive')
hyperdrive_run = experiment.submit(hd_config)
hyperdrive_run.id
hyperdrive_run = [r for r in experiment.get_runs() if r.id == 'absa_hyperdrive_1571092544235933'][0]
###Output
_____no_output_____
###Markdown
Monitor HyperDrive runsWe can monitor the progress of the runs with the following Jupyter widget.
###Code
from azureml.widgets import RunDetails
RunDetails(hyperdrive_run).show()
hyperdrive_run.cancel()
###Output
_____no_output_____
###Markdown
Find and register the best modelOnce all the runs complete, we can find the run that produced the model with the highest evaluation (METRIC TBD).
###Code
best_run = hyperdrive_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print(best_run)
print('Best Run is:\n F1: {0:.5f}'.format(
best_run_metrics['f1_weighted']
))
###Output
_____no_output_____
###Markdown
Register Model Outputs
###Code
aspect_lex = run.register_model(model_name='aspect_lex', model_path='outputs/generated_aspect_lex.csv')
opinion_lex = run.register_model(model_name='opinion_lex', model_path='outputs/generated_opinion_lex_reranked.csv')
###Output
_____no_output_____
###Markdown
Test Locally Install Local PIP Dependencies
###Code
!pip install git+https://github.com/NervanaSystems/nlp-architect.git@absa
!pip install spacy==2.0.18
###Output
_____no_output_____
###Markdown
Load Model From AzureML
###Code
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
c_aspect_lex = Model._get_model_path_remote('c_aspect_lex', 1, ws)
c_opinion_lex = Model._get_model_path_remote('c_opinion_lex', 1, ws)
inference = SentimentInference(c_aspect_lex, c_opinion_lex)
###Output
Using pre-trained BIST model.
###Markdown
Run Model On Sample Data
###Code
docs = ["Loved the sweater but hated the pants",
"Really great outfit, but the shirt is the wrong size",
"I absolutely love this jacket! i wear it almost everyday. works as a cardigan or a jacket. my favorite retailer purchase so far"]
sentiment_docs = []
for doc_raw in docs:
sentiment_doc = inference.run(doc=doc_raw)
sentiment_docs.append(sentiment_doc)
###Output
_____no_output_____
###Markdown
Visualize Model Results
###Code
import spacy
from spacy import displacy
from nlp_architect.models.absa.inference.data_types import TermType
ents = []
for doc in sentiment_docs:
if doc:
doc_viz = {'text':doc._doc_text, 'ents':[]}
for s in doc._sentences:
for ev in s._events:
for e in ev:
if e._type == TermType.ASPECT:
ent = {'start': e._start, 'end': e._start + e._len,
'label':str(e._polarity.value),
'text':str(e._text)}
if all(kown_e['start'] != ent['start'] for kown_e in ents):
ents.append(ent)
doc_viz['ents'].append(ent)
doc_viz['ents'].sort(key=lambda m: m["start"])
displacy.render(doc_viz, style="ent", options={'colors':{'POS':'#7CFC00', 'NEG':'#FF0000'}},
manual=True, jupyter=True)
###Output
_____no_output_____
###Markdown
Package Model For Deployment Create scoring scriptCreate the scoring script, called score.py, used by the web service call to show how to use the model.You must include two required functions into the scoring script:The init() function, which typically loads the model into a global object. This function is run only once when the Docker container is started.The run(input_data) function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
###Code
%%writefile score.py
from azureml.core.model import Model
from nlp_architect.models.absa.inference.inference import SentimentInference
from spacy.cli.download import download as spacy_download
def init():
"""
Set up the ABSA model for Inference
"""
global SentInference
spacy_download('en')
aspect_lex = Model.get_model_path('c_aspect_lex')
opinion_lex = Model.get_model_path('c_opinion_lex')
SentInference = SentimentInference(aspect_lex, opinion_lex)
def run(raw_data):
"""
Evaluate the model and return JSON string
"""
sentiment_doc = SentInference.run(doc=raw_data)
return sentiment_doc.json()
###Output
_____no_output_____
###Markdown
Create configuration files Create Enviorment Filecreate an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs nlp-architect and the azureml-sdk.
###Code
from azureml.core.conda_dependencies import CondaDependencies
pip = ["azureml-defaults", "azureml-monitoring",
"git+https://github.com/NervanaSystems/nlp-architect.git@absa",
"spacy==2.0.18",
""]
myenv = CondaDependencies.create(pip_packages=pip)
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
###Output
_____no_output_____
###Markdown
Create Environment ConfigCreate a Enviorment configuration file and specify the enviroment and enviormental variables required for the application
###Code
from azureml.core import Environment
deploy_env = Environment.from_conda_specification('absa_env', "myenv.yml")
deploy_env.environment_variables={'NLP_ARCHITECT_BE': 'CPU'}
###Output
_____no_output_____
###Markdown
Inference Config Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(environment=deploy_env,
entry_script="score.py")
###Output
_____no_output_____
###Markdown
Package Model and Pull Create an inference configuration that recieves the deployment enviorment and the entry script
###Code
package = Model.package(ws, [aspect_lex, opinion_lex], inference_config)
package.wait_for_creation(show_output=True)
package.pull()
###Output
_____no_output_____ |
nbs/temp.ipynb | ###Markdown
Load Workbooks
###Code
planilhas = f.load_workbooks_from_drive()
planilhas
turmas = {k:v for k,v in planilhas.items() if '2019-1S' in k}
turmas = f.load_turmas()
turmas
for title, sh in turmas.items():
print(title)
wb = sh.worksheet("Lista de Presença")
cells_1, cells_2 = wb.range("AP4:AP28"), wb.range("AQ4:AQ28")
cells = wb.range("AP4:AQ28")
for i, (c1, c2) in enumerate(zip(cells_1, cells_2), 4):
c1.value = fr'=if(AO{i}="Absent", "Undone", "")'
c2.value = fr'=if(AO{i}="Absent", 0, "")'
#wb.update_cell(3,2, r"='Parâmetros'!$A$14")
#wb.update_cell(6,2, r"=Class_Plan!$F$13")
wb.update_cells(cells_1, "USER_ENTERED")
wb.update_cells(cells_2, "USER_ENTERED")
#while i < 421:
# wb.update_cell(i, 1, fr"=image(Info_Students!$K${j})")
# i += 18
# j += 1
config = planilhas['Configuração_Planilhas'].worksheet("Class_Plan")
i = 6
wb = sh.worksheet(f'Aula {i}')
aula = wb.get_all_values()
Theme = aula[5][1]
config.update_cell(i, 1, i)
config.update_cell(i, 6 , Theme)
i += 1
aula[-3][0].split("\n")[1][23:54]
###Output
_____no_output_____
###Markdown
ABA Parâmetros
###Code
param = {}
for title, wb in turmas.items():
param[title] = f.load_sheet_from_workbook(wb, "Voluntarios_Geral", skiprows=1)
###Output
_____no_output_____
###Markdown
Exportar Configurações do Semestre
###Code
config = f.load_sheet_from_workbook(planilhas['Configuração_Semestre'], 'Parâmetros', skiprows=1)
txt = r'=importrange("https://docs.google.com/spreadsheets/d/1zg8roK0-EFySIZivBaHkLeNcVsNJ1t41YrpMzlli6vQ", "voluntarios_2018_2S")'
for title, (sh, _) in param.items():
print(title)
sh.clear()
sh.update_cell(1, 1, txt)
###Output
_____no_output_____
###Markdown
Load Student Sheets
###Code
students = {}
for title, wb in turmas.items():
students[title] = f.load_sheet_from_workbook(wb, 'Students')
###Output
_____no_output_____
###Markdown
Load Listas de Presença
###Code
listas = f.carrega_listas()
s = r'"Aula "'
for sheet, _ in listas.values():
#sheet.update_acell("R1", fr"=CONCATENATE({s},'Parâmetros'!$B7 ," ",'Parâmetros'!$C7)")
for i in range(4, 29):
from time import sleep
sheet = students["A1"][0]
c = 2
for index in range(5, sheet.row_count+1, 18):
print(index)
i = index
#cell_list = sheet.range(f"E{i}:F{i+3}")
sheet.update_cell(i, 1, fr"=Info_Alunos!$B${c}")
#sheet.cell(i, 1).value = fr"=Info_Alunos!$B${c}"
i += 1
sleep(2)
sheet.update_cell(i, 6, fr"=Info_Alunos!$B${c}")
#sheet.cell(i, 6).value = fr"=Info_Alunos!$B${c}"
i += 1
sleep(2)
sheet.update_cell(i, 6, fr"=Info_Alunos!$D${c}")
#sheet.cell(i, 6).value = fr"=Info_Alunos!$D${c}"
i += 1
sleep(2)
sheet.update_cell(i, 6, fr"=Info_Alunos!$F${c}")
#sheet.cell(i, 6).value = fr"=Info_Alunos!$F${c}"
i += 1
sleep(2)
sheet.update_cell(i, 6, fr"=Info_Alunos!$G${c}")
#sheet.cell(i, 6).value = fr"=Info_Alunos!$G${c}"
i += 1
sleep(2)
sheet.update_cell(i, 6, fr"=Info_Alunos!$A${c}")
#sheet.cell(i, 6).value = fr"=Info_Alunos!$A${c}"
i += 5
sleep(2)
sheet.update_cell(i, 5, 'Attendance')
sleep(2)
#sheet.cell(i, 5).value = 'Attendance'
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$B${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$B${c}"
i +=1
sheet.update_cell(i, 5, 'Homework')
sleep(2)
#sheet.cell(i, 5).value = 'Homework'
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$C${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$C${c}"
i +=1
sheet.update_cell(i, 5, 'English Usage')
sleep(2)
#sheet.cell(i, 5).value = 'English Usage'
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$D${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$D${c}"
i +=1
sheet.update_cell(i, 5, 'Speaking')
sleep(2)
#sheet.cell(i, 5).value = 'Speaking'
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$E${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$E${c}"
i +=1
sheet.update_cell(i, 5, 'Midterm')
sleep(2)
#sheet.cell(i, 5).value = 'Midterm'
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$F${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$F${c}"
i +=1
sheet.update_cell(i, 5, 'Final Exam')
sleep(2)
#sheet.cell(i, 5).value = 'Final Exam'
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$G${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$G${c}"
i +=1
sheet.update_cell(i, 5, "Final Grade")
sleep(2)
#sheet.cell(i, 5).value = "Final Grade"
sheet.update_cell(i, 6, fr"='Notas dos Alunos'!$I${c}")
sleep(2)
#sheet.cell(i, 6).value = fr"='Notas dos Alunos'!$I${c}"
#sheet.update_cells(cell_list)
c += 1
#sleep(100)
#sheet.update_cells(sheet.range(f"A1:F{sheet.row_count}"))
###Output
_____no_output_____
###Markdown
Corrige células nulas das planilhas de presença
###Code
p = ["P" + str(i) for i in range(1, 13)]
h = [c for c in df.columns if "HW" in c and "_" not in c]
cp = [c for c in df.columns if "CP" in c]
p, h, cp
df.columns
name = "A1"
df = listas[name][1]
cols = range(7,42)
df.iloc[:, cols].fillna("")
df.iloc[:, cols]
df[p]
#df[["P7", "P8", "P9", "P12"]] = df[["P7", "P8", "P9", "P12"]].replace("nan", "Ausente")
df[presença] = df[presença].replace("", "Ausente")
df[presença]
df[h]
#df.HW9 = df.HW9.replace("Não Fez", "Não Houve")
#df[homework] = df[homework].replace('', "Não Fez")
df[h]
df[cp]
df[["SPK1", 'SPK2']]
for presença, participação in zip([i for i in p if i not in ["P1", "P6", "P7", "P12"]], cp):
df[participação][df[presença] == "Ausente"] = ""
df[participação][df[presença] == "Presente"] = 'Good'
df["SPK1"][df['P6'] == "Ausente"] = 0.00
#df["SPK2"][df['P12'] == "Ausente"] = 0.00
df['Nota_Mid'][df['P7'] == "Ausente"] = 0.00
df[["Nome", "Nota_Final"]][df.Nota_Final != 'nan']
f.salva_aba_no_drive(df.iloc[:, 7:40], 'J23_2018-2S_Feedback_'+name, aba_drive="Lista de Presença", row=4, col=8)
for title, wb in wbs.items():
if "2018-2S_Feedback" in title:
sheet = wb.worksheet('Students')
#print(dir(sheet))
for i in range(15,sheet.row_count+1, 18):
print(sheet.cell(i, 5).value)
break
values[13]
templates = [wb for wb in wbs if "2018-2S" in wb.title]
templates
ws = templates[0]
delete = ["LEIA-ME", "Teachers", "Alocação", "Infos Matricula", "Students", "Lista_de_Presença"]
for ws in templates:
for sheet in ws.worksheets():
if sheet.title in delete:
try:
ws.del_worksheet(sheet)
except Exception as e:
print(repr(e))
###Output
_____no_output_____ |
data_analysis/Figure_4_PROseq.ipynb | ###Markdown
Figure 4B
###Code
my_colours = ['#43006A', '#81176D', '#B02363']
plt_proseq_ss_windows = (ggplot
(data=proseq_windows_long, mapping=aes( x='window', y = 'value', fill = 'window')) +
facet_wrap(facets = 'splice_site') +
geom_violin(width = 0.8) +
geom_boxplot(width = 0.3, fill = 'white', alpha = 0.4) +
theme_linedraw(base_size = 12) +
theme(axis_text_x=element_text(rotation=45, hjust=1)) +
theme(figure_size = (6,4)) +
# xlab('CoSE (Long-Read Seq)') +
ylab('PROseq Read Density') +
ylim(0,50) +
scale_fill_manual(values = my_colours)
)
plt_proseq_ss_windows
# Statistical tests for PROseq read densitites (can do these on the input file in wide format most easily)
p_upstream5_junction5 = scipy.stats.ttest_rel(proseq_windows['upstream5'], proseq_windows['junction5'])[1]
p_upstream5_downstream5 = scipy.stats.ttest_rel(proseq_windows['upstream5'], proseq_windows['downstream5'])[1]
p_upstream3_junction3 = scipy.stats.ttest_rel(proseq_windows['upstream3'], proseq_windows['junction3'])[1]
p_upstream3_downstream3 = scipy.stats.ttest_rel(proseq_windows['upstream3'], proseq_windows['downstream3'])[1]
print("p-value 5'SS upstream vs junction = " + str(p_upstream5_junction5))
print("p-value 5'SS upstream vs downstream = " + str(p_upstream5_downstream5))
print("p-value 3'SS upstream vs junction = " + str(p_upstream3_junction3))
print("p-value 3'SS upstream vs downstream = " + str(p_upstream3_downstream3))
###Output
p-value 5'SS upstream vs junction = 0.01931662973030425
p-value 5'SS upstream vs downstream = 2.2354580496686518e-41
p-value 3'SS upstream vs junction = 1.372328012662401e-21
p-value 3'SS upstream vs downstream = 0.12384670201349503
###Markdown
Save Output Figures
###Code
plt_proseq_ss_windows.save('PROseq_signals_around_splice_sites.pdf') # Fig 4B
###Output
_____no_output_____ |
week2/1.pandas_basics.ipynb | ###Markdown
Pandas means Panel data analysis library1. It is good for tabular data, like Excel spreadsheet, but much more powerful1. Today's tutorial is more like a sketch, I will point out resources for more in-depth tutorials1. I will cover key functions of pandas so you can finish Assignment 1 and research project1. You may require functions outside this tutorial when dealing with research project. You should google it first, read documentation, and I will provide help if necessary
###Code
# import our beautiful libraries
import pandas as pd
import numpy as np
# Let's get some data and put into pandas,
# reading from my server on Amazon Cloud, you realise I am using the same data from MFIN6201 class...
asset = pd.read_csv('https://mfin6201.s3.amazonaws.com/company_assets.csv')
# inspect the data
asset
# to list out the columns, just convert it a list
list(asset)
# or call the columns attribute
asset.columns
# I can rename the columns,
asset.columns = ['firm_id','date','fyear','asset']
# For now, I will just convert it back
asset.columns = ['permno', 'datadate', 'fyear', 'at']
# or you can print the first few rows
asset.head() # asset.head(10) to print first 10 rows, asset.tail() for last rows
# to read documentation of particular commands, type ? after it
pd.read_csv?
# get info for the current dataset
asset.info()
# object Dtype suggesting Pandas does not exactly what type is it
# get summary stats for the current dataset
asset.describe() # analogue to stata's sum. but much better and versatile
# I can print more stats
asset.describe([0.01,0.05,0.10,0.25,0.5,0.75,0.9,0.95,0.99])
# stats are easy to get
print(asset['at'].mean())
print(asset['at'].std())
print(asset['at'].median())
# let's convert datadate to proper datetime values so we can work with it
# we can extract a column by doing selection
asset['datadate']
# use pandas built-in function to convert it to datetime values
pd.to_datetime(asset['datadate'])
# of course, you can browse the documentation of this command
pd.to_datetime?
# we need to overwrite the old column
asset['datadate'] = pd.to_datetime(asset['datadate'])
asset.info()
# Let's print out histgram, to display graph within Jupyter notebook, we need one magic command
%matplotlib inline
asset['at'].hist()
# seems many small firms
# Let's select large firms using boolean indexing
asset['at']>100 # this gives us a list of boolean values, we can use this to subset dataset
asset[asset['at']>10000]
# save the results to a new dataframeb
large_firms = asset[asset['at']>10000] # asset values are in million USD
# you can pass multiple arguments as selector
asset[(asset['at']>10000) & (asset['permno']<20000)]
# & is and operator, | is or operator
asset[(asset['at']>10000) | (asset['permno']<20000)]
# You can select a subset by columns
asset[['permno','fyear','at']]
# You can select a subset by columns, by passing a list to the selection
asset[['permno','fyear','at']]
# or subseting columns and rows at the same time, using loc function
asset.loc[asset['at']>10000, ['permno','fyear','at']]
# the syntax is df[rows, columns]
# to select entire rows or columns, use :
asset.loc[:,['permno','fyear','at']]
asset.loc[asset['at']>10000,:]
# Let's take log of the asset values so the distribution will be more "normal"
np.log(asset['at'])
asset['ln_at'] = np.log(asset['at']+1)
asset['ln_at'].hist(bins=20) # better, more "normal"
asset
# you can see that np function works on the entire series (column)
# you can use math function to work with individual values, but slower
import math
math.log(2)
# math module cannot work with a series
# math.log(asset['at'])
# you need to map the normal python moduel function to series
# this may looks crazy to you, but this is how it works
(asset['at']+1).map(math.log)
# you can map any abitrary function
def crazy_function(series_element):
return math.sin(series_element)**2 + math.cos(series_element)**2
(asset['at']).map(crazy_function)
# of course, for numeric computation, it is easier and more efficient just use numpy
# map and other function to perform row-wise operation is just give you the freedom
# especially non-numeric operations
np.sin(asset['at'])**2 + np.cos(asset['at'])**2
# if you want to work with multiple columns at the same time, you can use "apply"
# Suppose you want to multiple permno with at for crazy reasons, you can
# Here, lambda expression is used to work with apply, it can define any arbitraty function on the fly
# Apply can only accept function as input
asset.apply(lambda df:df['permno'] * df['at'],axis=1)
# you can, define a function beforehand
def multiply_permno_and_at(df):
return df['permno'] * df['at']
asset.apply(multiply_permno_and_at,axis=1)
# of course, for numerical operations, it is more efficient to use normal syntax
# Again, that is just to give the freedom
asset['permno'] * asset['at']
# now let's deal with missing values, we do not have missing values in the data, then I will just create some
# the loc function I showed you earlier, actually gives the access to the dataframe itself
asset.loc[asset.permno>80000,'at']
# you can directly change its value, the missing value in pandas is represented as np.nan
# You can do None as in Python, but it's not the pandas way
asset.loc[asset.permno>80000,'at'] = np.nan
# Now the asset value for permno > 80000 is all missing (not a number)
asset[asset.permno>80000]
# reflected in the info
asset.info()
# we can, fill with some default values
asset.fillna(0) # this will fill 0 to entire dataframe
# More carefully, we just fill 0 to the at
asset['at'].fillna(0)
# you can fill with some other value, for example, we can fill with average value for this data
asset['at'].fillna(asset.mean())
# Above commands will not change the value of the df
asset.info()
# you need to fill inplace in order to change the value of the actual df
asset['at'].fillna(0,inplace=True)
# Let's create missing values again to talk about some more stuff
# This time let's say fyear 2000, we have missing values
asset.loc[asset.fyear==2000,'at'] = np.nan
# more sensible way is to fillna with average within the numbers reported by the company
asset[asset.permno==54594][lambda df:(df.fyear<2005) & (df.fyear>1996)]
# we can use groupby function, which we will talk about it later
asset['at'] = asset.groupby('permno')['at'].transform(lambda x:x.fillna(x.mean()))
asset[asset.permno==54594][lambda df:(df.fyear<2005) & (df.fyear>1996)]
# Groupby operations
# what is the average asset reported for each firm?
# using group by firm, this will return a series
asset.groupby('permno')['at'].mean()
asset.groupby('permno')['at'].mean().reset_index()
asset.groupby('fyear')['at'].sum().reset_index()
asset.groupby('fyear').mean() # this will only work on numeric columns
# you can compute different stats for different columns using agg function
asset.groupby('fyear').agg({
'permno':'nunique', 'at':'sum', 'ln_at':'mean'
})
# finally, let's drop duplicates to make the data to a panel
# For Panel data, you cannot have duplicated values for firm year
# Let's check that
# Let's count how many rows we get for each permno-fyear
asset.groupby(['permno','fyear']).size()
# Let's count how many rows we get for each permno-fyear and select numbers of rows > 1
asset.groupby(['permno','fyear']).size()[lambda s: s>1]
# we use datadate, you can use other columns to do boolean indexing, the choice does not matter
asset[asset.permno==15791] # these obs are due to change of reporting period, we should keep the lastest
nodup = asset.drop_duplicates(['permno','fyear'])
nodup[nodup.permno==15791] # these obs are due to change of reporting period, we should keep the lastest
pd.DataFrame.drop_duplicates?
nodup = asset.drop_duplicates(['permno','fyear'],keep='last')
nodup[nodup.permno==15791]
###Output
_____no_output_____ |
15-Neural-Networks/05_Sprint_Challenge.ipynb | ###Markdown
Neural Networks Sprint Challenge 1) Define the following terms:- Neuron - The base unit of a neural network. Accepts input data, applies a vector of weights, adds a bias, and applies an acitvation function- Input Layer - The input data, one node per feature/attribute- Hidden Layer - A layer of nodes that is neither the input nor the output layer. We cannot directly observe data in these layers.- Output Layer - The final layer that returns our output, typically a prediction along a continuous number line or a class probability.- Activation - A function that applies a transformaion to the data. Popular choices are sigmoid and tanh, which both 'squishify' our data to the 0:1 or -1:1 range.- Backpropagation - The method by which neural networks update weight vectors for each node, in one epoch. Using the derivative of the node's activation function, backpropagation can re-scale weights to minimize a global loss function. 2) Create a perceptron class that can model the behavior of an AND gate. You can use the following table as your training data:| x1 | x2 | x3 | y ||----|----|----|---|| 1 | 1 | 1 | 1 || 1 | 0 | 1 | 0 || 0 | 1 | 1 | 0 || 0 | 0 | 1 | 0 |
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Perceptron(object):
def __init__(self, rate = 0.1, n_iter = 10):
self.rate = rate
self.n_iter = n_iter
def fit(self, X, y):
self.weights = np.zeros(X.shape[1] + 1)
self.errors = []
for i in range(self.n_iter):
err = 0
for x_i, y_i in zip(X, y):
delta_w = self.rate * (y_i - self.predict(x_i))
self.weights[0] += delta_w
self.weights[1:] += delta_w * x_i
err += int(delta_w != 0.0)
self.errors.append(err)
return self
def predict(self, X):
net_input = np.dot(X, self.weights[1:]) + self.weights[0]
return np.where(net_input >= 0.0, 1, -1)
df = pd.DataFrame({
'x1':[1, 1, 0, 0],
'x2':[1, 0, 1, 0],
'x3':[1, 1, 1, 1],
'y': [1, 0, 0, 0]
})
X = df.drop(columns=['y']).values
y = df['y'] * 2 - 1
pn = Perceptron(rate = 0.1, n_iter = 10)
pn.fit(X, y)
plt.plot(range(1, len(pn.errors) + 1), pn.errors, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.show()
###Output
_____no_output_____
###Markdown
3) Implement a Neural Network Multilayer Perceptron class that uses backpropagation to update the network's weights. - Your network must have one hidden layer. - You do not have to update weights via gradient descent. You can use something like the derivative of the sigmoid function to update weights.- Train your model on the Heart Disease dataset from UCI:[Github Dataset](https://github.com/ryanleeallred/datasets/blob/master/heart.csv)[Raw File on Github](https://raw.githubusercontent.com/ryanleeallred/datasets/master/heart.csv)
###Code
class MultiLayerPerceptron(object):
"""MLP with one hidden layer and one output layer"""
def __init__(self, n_input_nodes, n_hidden_nodes, n_output_nodes):
self.n_input_nodes = n_input_nodes
self.n_hidden_nodes = n_hidden_nodes
self.n_output_nodes = n_output_nodes
self.L1_weights = np.random.randn(self.n_input_nodes, self.n_hidden_nodes)
self.L2_weights = np.random.randn(self.n_hidden_nodes, self.n_output_nodes)
def predict(self, X):
# hidden layer
self.L1_sum = np.dot(X, self.L1_weights)
self.L1_activated = self.sigmoid(self.L1_sum)
# output layer
self.L2_sum = np.dot(self.L1_activated, self.L2_weights)
self.L2_activated = self.sigmoid(self.L2_sum)
return self.L2_activated
def sigmoid(self, s):
return 1 / (1 + np.exp(-s))
def sigmoid_prime(self, s):
return s * (1 - s)
def backpropagate(self, X, y, output):
self.output_error = y - output
self.output_delta = self.output_error * self.sigmoid_prime(output)
self.z2_error = self.output_delta.dot(self.L2_weights.T)
self.z2_delta = self.z2_error * self.sigmoid_prime(self.L1_activated)
self.L1_weights += X.T.dot(self.z2_delta)
self.L2_weights += self.L1_activated.T.dot(self.output_delta)
def fit(self, X, y, n_iter, silent=False):
history = []
for i in range(n_iter):
y_pred = self.predict(X)
loss = np.mean(np.square(y - y_pred))
history.append(loss)
if not silent:
print(f'EPOCH {i+1} Loss: {loss}')
self.backpropagate(X, y, y_pred)
return history
###Output
_____no_output_____
###Markdown
Testing from lecture example
###Code
df = pd.DataFrame({
'x1':[0, 0, 1, 0, 1, 1, 0],
'x2':[0, 1, 0, 1, 0, 1, 0],
'x3':[1, 1, 1, 0, 0, 1, 0],
'y': [0, 1, 1, 1, 1, 0, 0]
})
X = df.drop(columns='y').values
y = df[['y']].values
nn = MultiLayerPerceptron(
n_input_nodes=X.shape[1],
n_hidden_nodes=3,
n_output_nodes=1
)
history = nn.fit(X, y, 4000, silent=True)
plt.plot(history)
nn.predict(X)
###Output
_____no_output_____
###Markdown
Sprint Example
###Code
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('https://raw.githubusercontent.com/'
'ryanleeallred/datasets/master/heart.csv')
X = StandardScaler().fit_transform(df.drop(columns=['target']).values)
y = df[['target']].values
nn = MultiLayerPerceptron(
n_input_nodes=X.shape[1],
n_hidden_nodes=8,
n_output_nodes=1
)
history = nn.fit(X, y, 1000, silent=True)
plt.plot(history);
###Output
_____no_output_____
###Markdown
4) Implement a Multilayer Perceptron architecture of your choosing using the Keras library. Train your model and report its baseline accuracy. Then hyperparameter tune at least two parameters and report your model's accuracy. - Use the Heart Disease Dataset (binary classification)- Use an appropriate loss function for a binary classification task- Use an appropriate activation function on the final layer of your network. - Train your model using verbose output for ease of grading.- Use GridSearchCV to hyperparameter tune your model. (for at least two hyperparameters)- When hyperparameter tuning, show you work by adding code cells for each new experiment. - Report the accuracy for each combination of hyperparameters as you test them so that we can easily see which resulted in the highest accuracy.- You must hyperparameter tune at least 5 parameters in order to get a 3 on this section.
###Code
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
df = pd.read_csv('https://raw.githubusercontent.com/'
'ryanleeallred/datasets/master/heart.csv')
X = StandardScaler().fit_transform(df.drop(columns=['target']).values)
y = df['target'].values
def create_model():
model = Sequential()
model.add(Dense(12, input_dim=13, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=create_model, verbose=1)
param_grid = {
'batch_size': [10, 20, 40, 50],
'epochs': [20, 40]
}
grid = GridSearchCV(
estimator=model,
param_grid=param_grid,
cv=3,
n_jobs=-1,
)
grid_result = grid.fit(X, y)
retults = pd.DataFrame({
'mean': grid_result.cv_results_['mean_test_score'],
'Stdev': grid_result.cv_results_['std_test_score'],
'params': grid_result.cv_results_['params']}
)
retults.sort_values('mean', ascending=False)
###Output
_____no_output_____ |
content/materials/notebooks/book_balancing_solutions.ipynb | ###Markdown
Download Notebook link: http://tinyurl.com/eng122-oct19 Submission Step 1: Rename your notebookLAST1_NAME1_LAST2_NAME2for example:MOORE_JASON_HOWLETT_JAMES Step 2: Turn in your notebook and a PDF version to **Canvas** by midnight Saturday. IntroductionThe center of mass of a book (modeled as a homogeneous cuboid of mass $m$, length $l$, and height $h$) lies directly above the top of a cylinder of radius $r$. There is sufficient friction to prevent slipping of the two surfaces when the book is perturbed from this equilibrium position. Let $\theta$ be the angle between the vertical and the radial line that passes through the contact point when the book oscillates in a vertical plane perpendicular to the axis of the cylinder.
###Code
from IPython.display import SVG, Latex
SVG('book-balance.svg')
###Output
_____no_output_____
###Markdown
Non-linear Equation of MotionThe book oscillates at an angular rate, $\dot{\theta}$, and the magnitude of the velocity of the center of mass of the book can be shown to be $v = \frac{1}{2} \sqrt{\left(h^{2} + 4 r^{2} \theta^{2}\right) \dot{\theta}^{2}}$. The moment of inertia of the book is approximately $\frac{m}{12} \left(h^{2} + l^{2}\right)$.Thus, the total kinetic energy is:$$T = \frac{m \dot{\theta}^{2}}{24} \left(h^{2} + l^{2}\right) + \frac{m \dot{\theta}^{2}}{8} \left(h^{2} + 4 r^{2} \theta^{2}\right)$$The potential energy is:$$U = - g m \left(\frac{h}{2} - r \theta \operatorname{sin}\left(\theta\right) + r - \left(\frac{h}{2} + r\right) \operatorname{cos}\left(\theta\right)\right)$$The Langragian can be formed and then finally the equation of motion:$$ - \frac{g h}{2} m \operatorname{sin}\left(\theta\right) + g m r \theta \operatorname{cos}\left(\theta\right) + \frac{h^{2} m}{3} \ddot{\theta} + \frac{l^{2} m}{12} \ddot{\theta} + m r^{2} \theta^{2} \ddot{\theta} + m r^{2} \theta \dot{\theta}^{2} = 0$$
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
###Output
_____no_output_____
###Markdown
QuestionLinearize the non-linear equation of motion about $\theta=0$ and type the result below: $$- \frac{g h}{2} \theta + g r \theta + \frac{h^{2} \ddot{\theta}}{3} + \frac{l^{2} \ddot{\theta}}{12}=0$$ QuestionDerive an approximate expression for the natural frequency of small oscillations and compute the natural frequency in rad/s and Hz along with the period of osciallation. Use the parameters $m= 1.058\textrm{ kg}$, $l = 0.238 \textrm{ m}$, $g = 9.81 \textrm{ ms}^{-1}$, $h = 0.029 \textrm{ m}$, and $r=0.042 \textrm{ m}$, which correspond to the course texbook and a typical coffee mug. $$\omega_n = \sqrt{\frac{6g(2r-h)}{4h^2 + l^2}}$$
###Code
m = 1.058 # kg
l = 0.238 # m
g = 9.81 # m/s^2
h = 0.029 # m
r = 0.042 # m
omega_n = np.sqrt(6 * g * (2 * r - h) / (4 * h**2 + l**2))
Latex('$\omega_n = {:1.3f} \\textrm{{ rad/s}}$'.format(omega_n))
Latex('$f_n = {:1.3f} \\textrm{{ Hz}}$'.format(omega_n / 2 / np.pi))
Tn = 1 / (omega_n / 2 / np.pi)
Latex('$T_n = {:1.3f} \\textrm{{ s}}$'.format(Tn))
###Output
_____no_output_____
###Markdown
Question 2Describe how the mass affects the natural frequency of the system. The natural frequency of the system is not affected by the mass. Question 3Are there any limits to the size of the book (the ratios $h/r$ or $l/r$) by a requirement of stability of the oscillations? Said another way, how high a pile of books can you balance? Hint; it is probably difficult to balance a very high pile of book. Recall that if the effective stiffness is negative you will get unstable behavior in an $m$ $k$ model. If the effective stiffness is less than zero, $k < 0$, the system will be unstable about the equilibrium point. The effective stiffness is:$$k = r-\frac{h}{2}$$so$$r > \frac{h}{2}$$for the system to be stable. Question 4Simulate the linear and non-linear equations of motion that predict the free response of the book released from rest at an initial angle $\theta_0=1\textrm{ deg}$ for 10 seconds. Use `scipy.integrate.odeint` for the numerical integration. Plot the results of each simulation on the same graph.
###Code
from scipy.integrate import odeint
def eval_nonlin_rhs(x, t):
"""Evaluates the right hand side of the non-linear differential equations.
Parameters
==========
x : array_like, shape(2, )
An array of the states: [theta, omega].
t : float
The value of time.
Returns
=======
xdot : array_like, shape(2, )
An array of the derivatives of the states: [thetadot, omegadot].
"""
theta, omega = x
thetadot = omega
I = h**2 / 3 + l**2 / 12 + r**2 * theta**2
omegadot = -(g * r * theta * np.cos(theta) + r**2 * theta * omega**2 - g * h * np.sin(theta) / 2) / I
return thetadot, omegadot
def eval_lin_rhs(x, t):
"""Evaluates the right hand side of the non-linear differential equations.
Parameters
==========
x : array_like, shape(2, )
An array of the states: [theta, omega].
t : float
The value of time.
Returns
=======
xdot : array_like, shape(2, )
An array of the derivatives of the states: [thetadot, omegadot].
"""
theta, omega = x
thetadot = omega
I = h**2 / 3 + l**2 / 12
omegadot = g * theta * (h / 2 - r) / I
return thetadot, omegadot
###Output
_____no_output_____
###Markdown
Simulate the system.
###Code
t = np.linspace(0, 10, num=1000)
x_nonlin = odeint(eval_nonlin_rhs, [np.deg2rad(1), 0], t)
x_lin = odeint(eval_lin_rhs, [np.deg2rad(1), 0], t)
###Output
_____no_output_____
###Markdown
Plot the trajectory of $\theta$.
###Code
fig, ax = plt.subplots(1, 1)
ax.plot(t, np.rad2deg(x_nonlin[:, 0]))
ax.plot(t, np.rad2deg(x_lin[:, 0]))
ax.set_xlabel('Time [s]')
ax.set_ylabel('$\\theta$ [deg]')
ax.legend(['Non-linear', 'Linear']);
###Output
_____no_output_____
###Markdown
QuestionUsing your program, create a function that calculates the period of the non-linear model to three significant figures of the 11 oscillations when $\theta_0= 1, 10 \textrm{ and } 20 \textrm{ deg}$. Compare these results to the period predicted by the linear model. By how much and why do they differ?*Hint: Look for sign changes with `np.sign()`, use boolean indexing to extract important times, and finally `np.diff()` and `np.mean()` can be useful for finding the delta times and averaging. Note that `np.diff()` returns one fewer item in the array it operates on.*
###Code
def find_period(t, theta):
"""Computes the period of oscillation based on the trajectory of theta.
Parameters
==========
t : array_like, shape(n,)
An array of monotonically increasing time values.
theta : array_like, shape(n,)
An array of values for theta at each time in ``t``.
Returns
=======
T : float
An estimate of the period of oscillation.
"""
peak_idxs = np.diff(np.sign(theta)) < 0
peak_idxs = np.hstack((peak_idxs, False))
T = np.diff(t[peak_idxs]).mean()
return T
T_d = find_period(t, x_nonlin[:, 0])
T_d
x_nonlin = odeint(eval_nonlin_rhs, [np.deg2rad(10), 0], t)
T_d = find_period(t, x_nonlin[:, 0])
T_d
x_nonlin = odeint(eval_nonlin_rhs, [np.deg2rad(20), 0], t)
T_d = find_period(t, x_nonlin[:, 0])
T_d
###Output
_____no_output_____
###Markdown
QuestionUsing your plot above plot the linear and non-linear time repsonses for $\theta_0=20 \textrm{ deg}$. What do you observe?
###Code
x_nonlin = odeint(eval_nonlin_rhs, [np.deg2rad(20), 0], t)
x_lin = odeint(eval_lin_rhs, [np.deg2rad(20), 0], t)
fig, ax = plt.subplots(1, 1)
ax.plot(t, np.rad2deg(x_nonlin[:, 0]))
ax.plot(t, np.rad2deg(x_lin[:, 0]))
ax.set_xlabel('Time [s]')
ax.set_ylabel('$\\theta$ [deg]')
ax.legend(['Non-linear', 'Linear']);
###Output
_____no_output_____
###Markdown
As time increases the non-linear system's period grows each cycle, whereas the linear system, as expected, has a constant period. QuestionMake a plot of the period vs $\theta_0=1,2,..,25$ for the non-linear system. Also plot a horizontal line for the linear period for comparison using `ax.axhline()`.*Hint: Use a `for` loop to iterate through `np.arange(1, 25)` and collect your results in an initially empty list with `.append()`.
###Code
initial_thetas = np.arange(1, 25) # deg
periods = []
for theta0 in initial_thetas:
x = odeint(eval_nonlin_rhs, [np.deg2rad(theta0), 0], t)
periods.append(find_period(t, x[:, 0]))
fig, ax = plt.subplots(1, 1)
ax.plot(initial_thetas, periods)
ax.axhline(Tn, color='black')
ax.set_xlabel(r'$\theta_0$ [rad]')
ax.set_ylabel(r'$T$ Period [s]')
ax.legend(['Non-linear', 'Linear']);
###Output
_____no_output_____
###Markdown
QuestionCompare the period predicted by your model to the period measured in class. If it does not match, what are the possible explanations? Is the linear model a good model to use for predicting motion of the system? In class, we measured 7 cycles at 5.93 seconds so the period is:
###Code
Tn_meas = 5.93 / 7
Tn_meas, Tn
###Output
_____no_output_____
###Markdown
The percent error is less than 1%! Pretty good model!
###Code
(Tn - Tn_meas) / Tn * 100
###Output
_____no_output_____ |
Session3/Day1/SoftwareRepositories.ipynb | ###Markdown
Code Repositories**Version 0.1**The notebook contains problems oriented around building a basic Python code repository and making it public via [Github](http://www.github.com). Of course there are other places to put code repositories, with complexity ranging from services comparable to github to simple hosting a git server on your local machine. But this focuses on git and github as a ready-to-use example with plenty of additional resources to be found online. Note that these problems assum you are using the Anaconda Python distribution. This is particular useful for these problems because it makes it very easy to install testing packages in virtual environments quickly and with little wasted disk space. If you are not using anaconda, you can either use an alternative virtual environment scheme (e.g. in Py 3, the built-in `venv`), or just install pacakges directly into your default python (and hope for the best...).For `git` interaction, this notebook also uses the `git` command line tools directly. There are a variety of GUI tools that make working with `git` more visually intuitive (e.g. [SourceTree](http://www.sourcetreeapp.com), [gitkraken](http://www.gitkraken.com), or the [github desktop client](https://desktop.github.com)), but this notebook uses the command line tools as the lowest common denominator. You are welcome to try to reproduce the steps with your client, however - feel free to ask your neighbors or instructors if you run into trouble there.As a final note, this notebook's examples assume you are using a system with a unix-like shell (e.g. macOS, Linux, or Windows with git-bash or the Linux subsystem shell). * * *E Tollerud (STScI) Problem 0: Using Jupyter as a shell As an initial step before diving into code repositories, it's important to understand how you can use Jupyter as a shell. Most of the steps in this notebook require interaction with the system that's easier done with a shell or editor rather than using Python code in a notebook. While this could be done by opening up a terminal beside this notebook, to keep most of your work in the notebook itself, you can use the capabilities Jupyter + IPython offer for shell interaction. 0a: Figure out your base shell path and what's in it The critical trick here is the ``!`` magic in IPython. Anything after a leading ``!`` in IPython gets run by the shell instead of as python code. Run the shell command ``pwd`` and ``ls`` to see where IPython thinks you are on your system, and the contents of the directory.*hint: Be sure to remove the "complete"s below when you've done so. IPython will interpret that as part of the shell command if you don't*
###Code
! #complete
! #complete
###Output
_____no_output_____
###Markdown
0b: Try a multi-line shell command IPython magics often support "cell" magics by having ``%%`` at the top of a cell. Use that to cd into the directory below this one ("..") and then ``ls`` inside that directory.*Hint: if you need syntax tips, run the ``magic()`` function and look for the `!` or `!!` commands*
###Code
%%sh
#complete
###Output
_____no_output_____
###Markdown
0c: Create a new directory from JupyterWhile you can do this almost as easily with `os.mkdir` in Python, for this case try to do it using shell magics instead. Make a new directory in the directory you are currently in. Use your system file browser to ensure you were sucessful.
###Code
! #complete
###Output
_____no_output_____
###Markdown
0d: Change directory to your new directory One thing about shell commands is that they always start wherever you started your IPython instance. So doing ``cd`` as a shell command only changes things temporarily (i.e. within that shell command). IPython provides a `%cd` magic that makes this change last, though. Use this to `%cd` into the directory you just created, and then use the `pwd` shell command to ensure this cd "stuck" (You can also try doing `cd` as a **shell** command to prove to yourself that it's different from the `%cd` magic.)
###Code
%cd #complete
###Output
_____no_output_____
###Markdown
Final note: ``%cd -0`` is a convenient shorthand to switch back to the initial directory. Problem 1: Creating a bare-bones repo and getting it on Github Here we'll create a simple (public) code repository with a minimal set of content, and publish it in github. 1a: Create a basic repository locally Start by creating the simplest possible code repository, composed of a single code file. Create a directory (or use the one from *0c*), and place a ``code.py`` file in it, with a bit of Python code of your choosing. (Bonus points for witty or sarcastic code...) You could even use non-Python code if you desired, although Problems 3 & 4 feature Python-specific bits so I wouldn't recommend it.To make the file from the notebook, the ``%%file `` magic is a convenient way to write the contents of a notebook cell to a file.
###Code
!mkdir #complete only if you didn't do 0c, or want a different name for your code directory
%%file <yourdirectory>/code.py
def do_something():
# complete
print(something)# this will make it much easier in future problems to see that something is actually happening
###Output
_____no_output_____
###Markdown
If you want to test-run your code:
###Code
%run <yourdirectory>/code.py # complete
do_something()
###Output
_____no_output_____
###Markdown
1b: Convert the directory into a git repo Make that code into a git repository by doing ``git init`` in the directory you created, then ``git add`` and ``git commit``.
###Code
%cd # complete
!git init
!git add code.py
!git commit -m #complete
###Output
_____no_output_____
###Markdown
1c: Create a repository for your code in Github Go to [github's web site](http://www.github.com) in your web browser. If you do not have a github account, you'll need to create one (follow the prompts on the github site). Once you've got an account, you'll need to make sure your git client can [authenticate with github](https://help.github.com/categories/authenticating-to-github/). If you're using a GUI, you'll have to figure it out (usually it's pretty easy). On the command line you have two options: * The simplest way is to connect to github using HTTPS. This requires no initial setup, but `git` will prompt you for your github username and password every so often.* If you find that annoying (I do...), you can set up your system to use SSH to talk to github. Look for the "SSH and GPG keys" section of your settings on github's site, or if you're not sure how to work with SSH keys, check out [github's help on the subject](https://help.github.com/articles/connecting-to-github-with-ssh/). Once you've got github set up to talk to your computer, you'll need to create a new repository for the code you created. Hit the "+" in the upper-right, create a "new repository" and fill out the appropriate details (don't create a README just yet). To stay sane, I recommend using the same name for your repository as the local directory name you used... But that is *not* a requirement, just a recommendation. Once you've created the repository, connect your local repository to github and push your changes up to github.
###Code
!git remote add <yourgithubusername> <the url github shows you on the repo web page> #complete
!git push <yourgithubusername> master -u
###Output
_____no_output_____
###Markdown
The ``-u`` is a convenience that means from then on you can use just ``git push`` and ``git pull`` to send your code to and from github. 1e: Modify the code and send it back up to github We'll discuss proper documentation later. But for now make sure to add a README to your code repository. Always add a README with basic documentation. Always. Even if only you are going to use this code, trust me, future you will be very happy you did it. You can just call it `README`, but to get it to get rendered nicely on the github repository, you can call it ``README.md`` and write it using markdown syntax, ``REAMDE.rst`` in ReST (if you know what that is) or various other similar markup languages github understands. If you don't know/care, just use ``README.md``, as that's pretty standard at this point.
###Code
%%file README.md
# complete
###Output
_____no_output_____
###Markdown
Don't forget to add and commit via ``git`` and push up to github...
###Code
!git #complete
###Output
_____no_output_____
###Markdown
1f: Choose a License A bet you didn't expect to be reading legalese today... but it turns out this is important. If you do not explicitly license your code, in most countries (including the US and EU) it is technically **illegal** for anyone to use your code for any purpose other than just looking at it.(Un?)Fortunately, there are a lot of possible open source licenses out there. Assuming you want an open license, the best resources is to use the ["Choose a License" website](http://choosealicense.org). Have a look over the options there and decide which you think is appropriate for your code. Once you've chosen a License, grab a copy of the license text, and place it in your repository as a file called ``LICENSE`` (or ``LICENSE.md`` or the like). Some licenses might also suggest you place the license text or just a copyright notice in the source code as well, but that's up to you. Once you've done that, do as we've done before: push all your additions up to github. If you've done it right, github will automatically figure out your license and show it in the upper-right corner of your repo's github page.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Problem 2: Collaborating with others' repos There's not much point in having open source code if no one else can look at it or use it. So now we'll have you try modify your neighbors' project using github's Pull Request feature. 2a: Get (git?) your neighbor's code repo Find someone sitting near you who has gotten through Problem 1. Ask them their github user name and the name of their repository. Once you've got the name of their repo, navigate to it on github. The URL pattern is always "https://www.github.com/theirusername/reponame". Use the github interface to "fork" that repo, yielding a "yourusername/reponame" repository. Go to that one, take note of the URL needed to clone it (you'll need to grab it from the repo web page, either in "HTTPS" or "SSH" form, depending on your choice in 1a). Then clone that onto your local machine.
###Code
# Don't forget to do this cd or something like it... otherwise you'll clone *inside* your repo
%cd -0
!git clone <url from github>#complete
%cd <reponame>#complete
###Output
_____no_output_____
###Markdown
2c: create a branch for your change You're going to make some changes to their code, but who knows... maybe they'll spend so long reviewing it that you want to do another. So it's always best to make changes in a specific "branch" for that change. So to do this we need to make a github branch.
###Code
!git branch <name-of-branch>#complete
###Output
_____no_output_____
###Markdown
2c: modify the code Make some change to their code repo. Usually this would be a new feature or a bug fix or documentation clarification or the like... But it's up to you. Once you've done that, be sure to commit the change locally.
###Code
!git add <files modified>#complete
!git commit -m ""#complete
###Output
_____no_output_____
###Markdown
and push it up (to a branch on *your* github fork).
###Code
!git push origin <name-of-branch>#complete
###Output
_____no_output_____
###Markdown
2d: Issue a pull request Now use the github interface to create a new "pull request". If you time it right, once you've pushed your new branch up, you'll see a prompt to do this automatically appear on your fork's web page. But if you don't, use the "branches" drop-down to navigate to the new branch, and then hit the "pull request" button. That should show you an interface that you can use to leave a title and description (in github markdown), and then submit the PR. Go ahead and do this. 2e: Have them review the PR Tell your neighbor that you've issued the PR. They should be able to go to *their* repo, and see that a new pull request has been created. There they'll review the PR, possibly leaving comments for you to change. If so, go to 2f, but if not, they should hit the "Merge" button, and you can jump to 2g. 2f: (If necessary) make changes and update the code If they left you some comments that require changing prior to merging, you'll need to make those changes in your local copy, commit those changes, and then push them up to your branch on your fork.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Hopefully they are now satisfied and are willing to hit the merge button. 2g: Get the updated version Now you should get the up-to-date version from the original owner of the repo, because that way you'll have both your changes and any other changes they might have made in the meantime. To do this you'll need to connect your local copy to your *nieghbor*'s github repo (**not** your fork).
###Code
!git remote add <neighbors-username> <url-from-neighbors-github-repo> #complete
!git fetch <neighbors-username> #complete
!git --set-upstream-to=<neighbors-username>/master master
!git checkout master
!git pull
###Output
_____no_output_____
###Markdown
Now if you look at the local repo, it should include your changes. *Suggestion* To stay sane, you might change the "origin" remote to your username. E.g. ``git remote rename origin ``. To go further, you might even *delete* your fork's `master` branch, so that only your neighbor's `master` exists. That might save you headaches in the long run if you were to ever access this repo again in the future. 2h: Have them reciprocate Science (Data or otherwise) and open source code is a social enterprise built on shared effort, mutual respect, and trust. So ask them to issue a PR aginst *your* code, too. The more we can stand on each others' shoulders, the farther we will all see.*Hint: Ask them nicely. Maybe offer a cookie or something?* Problem 3: Setting up a bare-bones Python Package Up to this point we've been working on the simplest possible shared code: a single file with all the content. But for most substantial use cases this isn't going to cut it. After all, Python was designed around the idea of namespaces that let you hide away or show code to make writing, maintaining, and versioning code much easier. But to make use of these, we need to deploy the installational tools that Python provides. This is typically called "packaging". In this problem we will take the code you just made it and build it into a proper python package that can be installed and then used anywhere.For more background and detail (and the most up-to-date recommendations) see the [Python Packaging Guide](https://packaging.python.org/current/). 3a: Set up a Python package structure for your code First we adjust the structure of your code from Problem 1 to allow it to live in a package structure rather than as a stand-alone ``.py`` file. All you need to do is create a directory, move the ``code.py`` file into that directory, and add a file (can be empty) called ``__init__.py`` into the directory.You'll have to pick a name for the package, which is usually the same as the repo name (although that's not strictly required).*Hint: don't forget to switch back to *your* code repo directory, if you are doing this immediately after Problem 2.*
###Code
!mkdir <yourpkgname>#complete
!git mv code.py <yourpkgname>#complete
#The "touch" unix command simply creates an empty file if there isn't one already.
#You could also use an editor to create an empty file if you prefer.
!touch <yourpkgname>/__init__.py#complete
###Output
_____no_output_____
###Markdown
3b: Test your package You should now be able to import your package and the code inside it as though it were some installed package like `numpy`, `astropy`, `pandas`, etc.
###Code
from <yourpkgname> import code#complete
#if your code.py has a function called `do_something` as in the example above, you can now run it like:
code.do_something()
###Output
_____no_output_____
###Markdown
3c: Apply packaging tricks One of the nice things about packages is that they let you hide the implementation of some part of your code in one place while exposing a "cleaner" namespace to the users of your package. To see a (trivial) example, of this, lets pull a function from your ``code.py`` into the base namespace of the package. In the below make the ``__init__.py`` have one line: ``from .code import do_something``. That places the ``do_something()`` function into the package's root namespace.
###Code
%%file <yourpkgname>/__init__.py
#complete
###Output
_____no_output_____
###Markdown
Now the following should work.
###Code
import <yourpkgname>#complete
<yourpkgname>.do_something()#complete
###Output
_____no_output_____
###Markdown
*BUT* you will probably get an error here. That's because Python is smart about imports: once it's imported a package once it won't re-import it later. Usually that saves time, but here it's a hassle. Fortunately, we can use the ``reload`` function to get around this:
###Code
from importlib import reload #not necessary on Py 2.x, where reload() is built-in
reload(<yourpkgname>)#complete
<yourpkgname>.do_something()#complete
###Output
_____no_output_____
###Markdown
3d: Create a setup.py file Ok, that's great in a pinch, but what if you want your package to be available from *other* directories? If you open a new terminal somewhere else and try to ``import `` you'll see that it will fail, because Python doesn't know where to find your package. Fortunately, Python (both the language and the larger ecosystem) provide built-in tools to install packages. These are built around creating a ``setup.py`` script that controls installation of a python packages into a shared location on your machine. Essentially all Python packages are installed this way, even if it happens silently behind-the-scenes. Below is a template bare-bones setup.py file. Fill it in with the relevant details for your package.
###Code
%%file /Users/erik/tmp/lsst-test/setup.py
#!/usr/bin/env python
from distutils.core import setup
setup(name='<yourpkgname>',
version='0.1dev',
description='<a description>',
author='<your name>',
author_email='<youremail>',
packages=['<yourpkgname>'],
) #complete
###Output
Overwriting /Users/erik/tmp/lsst-test/setup.py
###Markdown
3e: Build the package Now you should be able to "build" the package. In complex packages this will involve more involved steps like linking against C or FORTRAN code, but for pure-python packages like yours, it simply involves filtering out some extraneous files and copying the essential pieces into a build directory.
###Code
!python setup.py build
###Output
_____no_output_____
###Markdown
To test that it built sucessfully, the easiest thing to do is cd into the `build/lib.X-Y-Z` directory ("X-Y-Z" here is OS and machine-specific). Then you should be able to ``import ``. It's usually best to do this as a completely independent process in python. That way you can be sure you aren't accidentally using an old import as we saw above.
###Code
%%sh
cd build/lib.X-Y-Z #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
3f: Install the package Alright, now that it looks like it's all working as expected, we can install the package. Note that if we do this willy-nilly, we'll end up with lots of packages, perhaps with the wrong versions, and it's easy to get confused about what's installed (there's no reliable ``uninstall`` command...) So before installing we first create a virtual environment using Anaconda, and install into that. If you don't have anaconda or a similar virtual environment scheme, you can just do ``python setup.py install``. But just remember that this will be difficult to back out (hence the reason for Python environments in the first place!)
###Code
%%sh
conda create -n test_<yourpkgname> anaconda #complete
source activate test_<yourpkgname> #complete
python setup.py install
###Output
_____no_output_____
###Markdown
Now we can try running the package from *anywhere* (not just the source code directory), as long as we're in the same environment that we installed the package in.
###Code
%%sh
cd $HOME
source activate test_<yourpkgname> #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
3g: Update the package on github OK, it's now installable. You'll now want to make sure to update the github version to reflect these improvements. You'll need to add and commit all the files. You'll also want to update the README to instruct users that they should use ``python setup.py install`` to install the package.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Problem 4: Publishing your package on (fake) PyPI Now that your package can be installed by anyone who comes across it on github. But it tends to scare some people that they need to download the source code and know ``git`` to use your code. The Python Package Index (PyPI), combined with the ``pip`` tool (now standard in Python) provides a much simpler way to distribute code. Here we will publish your code to a **testing** version of PyPI. 4a: Create a PyPI account First you'll need an account on PyPI to register new packages. Go to the [testing PyPI](https://testpypi.python.org/pypi), and register. You'll also need to supply your login details in the ``.pypirc`` directory in your home directory as shown below. (If it were the real PyPI you'd want to be more secure and not have your password in plain text. But for the testing server that's not really an issue.)
###Code
%%file ~/.pypirc
[testpypi]
repository = https://testpypi.python.org/pypi
username = <your user name goes here>
password = <your password goes here>
###Output
Writing /Users/erik/.pypirca
###Markdown
4b: Register your package on PyPI ``distutils`` has built-in functionality for interacting with PyPI. This includes the ability to register your package directly from the command line, automatically filling out the details you provided in your ``setup.py``.*Hint: You'll want to make sure your package version is something you want to release before executing the register command. Released versions can't be duplicates of existing versions, and shouldn't end in "dev" or "b" or the like.*
###Code
!python setup.py register -r https://testpypi.python.org/pypi
###Output
_____no_output_____
###Markdown
(The ``-r`` is normally unnecessary, but we need it here because we're using the "testing" PyPI) 4c: Build a "source" version of your package Check out the PyPI page for your package. You'll see it now has the info from your setup.py *but* there's no package. Again, `distutils` provides a tool to do this automatically - you take the source distribution that was created, and upload it:
###Code
!python setup.py sdist
###Output
_____no_output_____
###Markdown
Verify that there is a ``-.tar.gz`` file in the ``dist`` directory. It should have all of the source code necessary for your package. 4d: Upload your package to PyPI Check out the PyPI page for your package. You'll see it now has the info from your setup.py *but* there's no package. Again, `distutils` provides a tool to do this automatically - you take the source distribution that was created, and upload it:
###Code
!python setup.py sdist upload -r https://testpypi.python.org/pypi
###Output
_____no_output_____
###Markdown
If for some reason this fails (which does happen for unclear reasons on occasion), you can usually just directly upload the ``.tar.gz`` file from the web interface without too much trouble. 4e: Install your package with ``pip`` The ``pip`` tool is a convenient way to install packages on PyPI. Again, we use Anaconda to create a testing environment to make sure everything worked correctly.(Normally the ``-i`` wouldn't be necessary - we're using it here only because we're using the "testing" PyPI)
###Code
%%sh
conda create -n test_pypi_<yourpkgname> anaconda #complete
source activate test_pypi_<yourpkgname> #complete
pip install -i https://testpypi.python.org/pypi <yourpkgname>
%%sh
cd $HOME
source activate test_pypi_<yourpkgname> #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____ |
notebooks/standalone_ML_demos.ipynb | ###Markdown
**Warning:** This notebook is 'deprecated', as now we're working on the 'MLencoding' class which is demoed in its own notebook. This one might be useful if you want some tips on implementing these things yourself. Beyond GLMsThis notebook accompanies "Modern Machine Learning Far Outperforms GLMs at Predicting Spikes". We implement various Machine Learning algorithms for spike prediction and offer this as a Python template. Table of contents0. Loading data0. Define helper functions for scoring and cross-validation0. Define Models 0. GLM 0. XGBoost 0. Neural Network 0. Random Forest 0. Ensemble0. Nested cross-validation (for evaluating the ensemble)0. Model Comparison 0. M1 original features 0. M1 engineered features 0. M1 all neurons0. Appendix 1: Hyperparameter optimization0. Appendix 2: Running R's glmnet in python DependenciesBasics- numpy- pandas- scipy- matplotlibMethods- sklearn- pyglmnet (glm)- xgboost- theano (NN)- keras (NN)Other- BayesOpt (Bayesian optimization for better hyperparameters)
###Code
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import scipy.io
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
#for plots
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis.set_tick_params(size=6)
ax.yaxis.set_tick_params(size=6)
colors=['#F5A21E', '#02A68E', '#EF3E34', '#134B64', '#FF07CD','b']
###Output
_____no_output_____
###Markdown
1. DataBelow we load two datasets available on CRCNS: a [Macaque M1](http://crcns.org/data-sets/movements/dream/downloading-dream) (from [Stevenston et al. 2011](http://jn.physiology.org/content/106/2/764.short)) and a [Rat Hippocampus](http://crcns.org/data-sets/hc/hc-2/about-hc-2) from [Mizuseki et al. 2009](http://www.ncbi.nlm.nih.gov/pubmed/19874793).The data has been organized in Matlab into neat arrays for easy loading here.We will soon want a single numpy array representing the external covariates, and a single numpy vector representing the neural response. The data array X will be of dimensions (n, p), where n is the number of time bins and p is the number of covariates, and the response y will be of dimensions (n, ) . We use pandas as an intermediate tool for data organizing, but it's really not necessary - if using your own data just wrangle it into numpy arrays of proper dimension. Load data
###Code
m1_imported = scipy.io.loadmat('../data/m1_stevenson_2011.mat')
###Output
_____no_output_____
###Markdown
1.1 CovariatesPull into pandas dataframe. This allows us to easily access covariates by name.
###Code
data = pd.DataFrame()
data['time'] = m1_imported['time'][0]
data['handPos_x'] = m1_imported['handPos'][0]
data['handPos_y'] = m1_imported['handPos'][1]
data['handVel_x'] = m1_imported['handVel'][0]
data['handVel_y'] = m1_imported['handVel'][1]
data.head()
###Output
_____no_output_____
###Markdown
Plot raw M1 dataThis gives some intuitive feeling for the type of data we're working with.
###Code
plt.figure(figsize=(10,10))
plt.rcParams['font.size'] = 10
plt.rcParams['font.weight'] = 'medium'
ldata = m1_imported['spikes'].shape
# Position scatter
ts = 14
gs0 = gridspec.GridSpec(3, 2)
plt.subplot(gs0[0,0])
simpleaxis(plt.gca())
plt.scatter(data['handPos_x'], data['handPos_y'], s=.1, alpha=.1)
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.axis('equal')
plt.title('Hand Position',fontsize=ts)
plt.ylim([-0.42,-0.16])
plt.xlim([-0.15,0.15])
plt.xticks(np.arange(-0.15,0.2, .1))
# Velocity scatter
plt.subplot(gs0[0,1])
simpleaxis(plt.gca())
plt.scatter(data['handVel_x'], data['handVel_y'], s=.1, alpha=.1)
plt.xlabel('$v_x$ [m/s]')
plt.ylabel('$v_y$ [m/s]')
plt.axis('equal')
plt.title('Hand Velocity',fontsize=ts)
# Position trace
plt.subplot(gs0[1,:])
a=plt.gca()
simpleaxis(a)
a.text(-20,-.03,'X',weight='bold')
a.text(-20,-.33,'Y',weight='bold')
alpha = 0.8
lw = 1
plt.plot(data['time'], data['handPos_x'], 'k-', alpha=alpha, lw=lw)
plt.plot(data['time'], data['handPos_y'], '-', color=colors[0], alpha=alpha, lw=lw)
a.axis([-40,800,-0.5,0.1])
plt.xlabel('Time [s]')
plt.ylabel('x, y [m]')
plt.title('Hand Position')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Compute more covariates/featuresThese will be used as the 'engineered' features for improving the GLM's performance.
###Code
data['velDir'] = np.arctan2(data['handVel_y'], data['handVel_x'])
data['cos_velDir'] = np.cos(data['velDir'])
data['sin_velDir'] = np.sin(data['velDir'])
data['speed'] = np.sqrt(data['handVel_x'].values**2+data['handVel_y'].values**2)
r = np.arctan2(data['handPos_y'], data['handPos_x'])
data['cos_PosDir'] = np.cos(r)
data['sin_PosDir'] = np.sin(r)
data['radial_Pos'] = np.sqrt(data['handPos_x'].values**2+data['handPos_y'].values**2)
data.head()
###Output
_____no_output_____
###Markdown
3. Cross-validationIt's important to 1. evaluate the performance of a method on held-out data2. train as as much data as you can3. have greater confidence in your results (see comic). These three criteria are met with [k-fold cross-validation (CV) ](https://en.wikipedia.org/wiki/Cross-validation_(statistics). The method `fit_cv` below returns a cross-validated pseudo-R2 score for an algorithm on a dataset.__Note regarding ensemble:__ When training a stacked ensemble, it's good practice to train the 2nd-stage regressor on the predictions of 1st-stage regressors _not trained on data in the ensemble's test set_. Otherwise there could be some data leakage and overfitting; the 1st-stage regressors may have learned some of statistics of the noise specific to the test set, for example. It's not guaranteed this will happen, especially for larger datasets, but to be correct you can use the `fit_nested_cv` and accompanying `ensemble_cv` methods in __Appendix 1__. [](http://xkcd.com/1725/)
###Code
from sklearn.model_selection import KFold
def poisson_pseudoR2(y, yhat, ynull):
# This is our scoring function. Implements pseudo-R2
yhat = yhat.reshape(y.shape)
eps = np.spacing(1)
L1 = np.sum(y*np.log(eps+yhat) - yhat)
L1_v = y*np.log(eps+yhat) - yhat
L0 = np.sum(y*np.log(eps+ynull) - ynull)
LS = np.sum(y*np.log(eps+y) - y)
R2 = 1-(LS-L1)/(LS-L0)
return R2
def fit_cv(X, Y, algorithm, n_cv=10, verbose=1):
"""Performs cross-validated fitting. Returns (Y_hat, pR2_cv); a vector of predictions Y_hat with the
same dimensions as Y, and a list of pR2 scores on each fold pR2_cv.
X = input data
Y = spiking data
algorithm = a function of (Xr, Yr, Xt) {training data Xr and response Yr and testing features Xt}
and returns the predicted response Yt
n_cv = number of cross-validations folds
"""
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
cv_kf = KFold(n_splits=n_cv, shuffle=True, random_state=42)
skf = cv_kf.split(X)
i=1
Y_hat=np.zeros(len(Y))
pR2_cv = list()
for idx_r, idx_t in skf:
if verbose > 1:
print( '...runnning cv-fold', i, 'of', n_cv)
i+=1
Xr = X[idx_r, :]
Yr = Y[idx_r]
Xt = X[idx_t, :]
Yt = Y[idx_t]
Yt_hat = eval(algorithm)(Xr, Yr, Xt)
Y_hat[idx_t] = Yt_hat
pR2 = poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))
pR2_cv.append(pR2)
if verbose > 1:
print( 'pR2: ', pR2)
if verbose > 0:
print("pR2_cv: %0.6f (+/- %0.6f)" % (np.mean(pR2_cv),
np.std(pR2_cv)/np.sqrt(n_cv)))
return Y_hat, pR2_cv
def plot_model_comparison(models_for_plot, models=[], color='r', title=None, labels=[],fs=12):
"""Just makes a comparision bar plot."""
plt.plot([-1, len(models_for_plot)], [0,0],'--k', alpha=0.4)
if not labels:
labels = models_for_plot
mean_pR2 = list()
sem_pR2 = list()
for model in models_for_plot:
PR2_art = models[model]['PR2']
mean_pR2.append(np.mean(PR2_art))
sem_pR2.append(np.std(PR2_art)/np.sqrt(np.size(PR2_art)))
plt.bar(np.arange(np.size(mean_pR2)), mean_pR2, 0.8, align='center',
ecolor='k', alpha=.9, color=color, ec='w', yerr=np.array(sem_pR2),
tick_label=labels)
plt.plot(np.arange(np.size(mean_pR2)), mean_pR2, 'k.', markersize=15)
plt.ylabel('Pseudo-R2',fontsize=fs)
simpleaxis(plt.gca())
if title:
plt.title(title)
###Output
_____no_output_____
###Markdown
Define Models GLMNote: Different problems may require different regularization parameters __alpha__ and __reg_lambda__. The __learning_rate, tol__, and __max_iter__ should also be adjusted to ensure convergence (they can be touchy).One can also compare two implementations of a GLM against each other to ensure proper convergence. See appendix for a note of how to implement R's 'glmnet' in python.
###Code
from pyglmnet import GLM
def glm_pyglmnet(Xr, Yr, Xt):
glm = GLM(distr='softplus', alpha=0.1, tol=1e-8, verbose=0,
reg_lambda=np.logspace(np.log(0.05), np.log(0.0001), 10, base=np.exp(1)),
learning_rate=2, max_iter=10000, eta=2.0, random_state=1)
glm.fit(Xr, Yr)
Yt = glm[-1].predict(Xt)
return Yt
###Output
_____no_output_____
###Markdown
XGBoostNote: Many of these parameters __(learning rate, estimators, subsampling, max_depth, and gamma)__ should be optimized for the prediction problem at hand. Optimization can be done with a grid search, randomized search, or with Bayesian Optimization (see appendix at bottom.)
###Code
import xgboost as xgb
def xgb_run(Xr, Yr, Xt):
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 1,
'learning_rate': 0.05,
'min_child_weight': 2, 'n_estimators': 580,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.4}
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
num_round = 200
bst = xgb.train(params, dtrain, num_round)
Yt = bst.predict(dtest)
return Yt
###Output
_____no_output_____
###Markdown
Neural NetsNote: Again, these parameters should be optimized. We highlight __dropout__, elastic net regularization __l1, l2__, and the number of nodes in the hidden layers. Optimization can be done with a grid search, randomized search, or with Bayesian Optimization (see appendix at bottom.)There are many, many options for implementing NNs. One might also test maxnorm regularization, e.g. RMSprop instead of Nadam, more or less layers, or different batch sizes or number of epochs.
###Code
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
from keras.regularizers import l1l2
from keras.optimizers import Nadam
def nn(Xr, Yr, Xt):
params = {'dropout': 0.5,
'l1': 0.0,
'l2': 0.0,
'n1': 1980, #number of layers in 1st hidden layer
'n2': 18}
if np.ndim(Xr)==1:
Xr = np.transpose(np.atleast_2d(Xr))
model = Sequential()
model.add(Dense(params['n1'], input_dim=np.shape(Xr)[1], init='glorot_normal',
activation='relu', W_regularizer=l1l2(params['l1'],params['l2'])))
model.add(Dropout(params['dropout']))
model.add(Dense(params['n2'], init='glorot_normal'
, activation='relu',W_regularizer=l1l2(params['l1'],params['l2'])))
model.add(Dense(1,activation='softplus'))
optim = Nadam()
model.compile(loss='poisson', optimizer=optim,)
hist = model.fit(Xr, Yr, batch_size = 32, nb_epoch=5, verbose=0, validation_split=0.0)
Yt = model.predict(Xt)[:,0]
return Yt
###Output
_____no_output_____
###Markdown
Other methodsThese methods aren't highlighted in the paper but may help to improve the ensemble. Random Forest
###Code
from sklearn.ensemble import RandomForestRegressor
def rf(Xr, Yr, Xt):
params = {'max_depth': 15,
'min_samples_leaf': 4,
'min_samples_split': 5,
'min_weight_fraction_leaf': 0.0,
'n_estimators': 471}
clf = RandomForestRegressor(**params)
clf.fit(Xr, Yr)
Yt = clf.predict(Xt)
return Yt
###Output
_____no_output_____
###Markdown
K-nearest neighbors
###Code
from sklearn.neighbors import KNeighborsRegressor
def knn(Xr, Yr, Xt):
neigh = KNeighborsRegressor(n_neighbors=5,weights='distance')
neigh.fit(Xr, Yr)
Yt = neigh.predict(Xt)
#returns list of probabilities for each category
return Yt
###Output
_____no_output_____
###Markdown
EnsembleOne can use any algorithm as the stacked model (the one which takes previous models as inputs). XGBoost tends to work well. Note that the input data Xr and Xt will be the _outputs_ Yt_hat of the above functions.A simple weighted model average also tends to work fairly well, in which case we'll use a (rectified) linear regressor as the stacked regressor. That's defined below.
###Code
from sklearn.linear_model import LinearRegression
def lin_comb(Xr, Yr, Xt):
lr = LinearRegression()
lr.fit(Xr, Yr)
Yt = lr.predict(Xt)
#rectify outputs
Yt = np.maximum(Yt,np.zeros(Yt.shape))
return Yt
###Output
_____no_output_____
###Markdown
Method comparisonLet's take a single neuron from the M1 set and test the above methods.First we'll test on the set of _original features_ (not engineered).
###Code
X = data[['handPos_x','handPos_y','handVel_x','handVel_y']].values
neuron_n = 1
y = m1_imported['spikes'][neuron_n]
###Output
_____no_output_____
###Markdown
Let's plot the neuron's response vs. velocity direction.
###Code
plt.figure(figsize=(10,5))
# Use jitter on the y axis for vizualization of trends
y_noise = np.random.randn(np.size(y))
nnoise = 0.2
plt.subplot(121)
simpleaxis(plt.gca())
plt.plot(data['handVel_x'], y+nnoise*y_noise, 'k.', alpha=0.1)
plt.xlabel('Speed in X')
plt.ylabel('spike counts')
plt.subplot(122)
simpleaxis(plt.gca())
plt.plot(data['handVel_y'], y+nnoise*y_noise, 'k.', alpha=0.1)
plt.xlabel('Speed in Y')
plt.ylabel('spike counts')
plt.show()
###Output
_____no_output_____
###Markdown
Now we can fit the data using a method from above:
###Code
#We'll store results here.
Models = dict()
Yt_hat, PR2 = fit_cv(X, y, algorithm = 'glm_pyglmnet', n_cv=8, verbose=2)
Models['glm'] = dict()
Models['glm']['Yt_hat'] = Yt_hat
Models['glm']['PR2'] = PR2
###Output
('...runnning cv-fold', 1, 'of', 8)
('pR2: ', 0.047990744845053945)
('...runnning cv-fold', 2, 'of', 8)
('pR2: ', 0.041296859143503339)
('...runnning cv-fold', 3, 'of', 8)
('pR2: ', 0.055977421252294768)
('...runnning cv-fold', 4, 'of', 8)
('pR2: ', 0.046396193112219875)
('...runnning cv-fold', 5, 'of', 8)
('pR2: ', 0.058007222444918738)
('...runnning cv-fold', 6, 'of', 8)
('pR2: ', 0.052409653792728994)
('...runnning cv-fold', 7, 'of', 8)
('pR2: ', 0.056002175496805306)
('...runnning cv-fold', 8, 'of', 8)
('pR2: ', 0.048319487792282678)
pR2_cv: 0.050800 (+/- 0.001905)
###Markdown
Or all at once:
###Code
methods = ['nn','xgb_run','rf','knn']
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method] = dict()
Models[method]['Yt_hat'] = Yt_hat
Models[method]['PR2'] = PR2
###Output
Running nn...
pR2_cv: 0.103180 (+/- 0.003494)
Running xgb_run...
pR2_cv: 0.151781 (+/- 0.004155)
Running rf...
pR2_cv: 0.152336 (+/- 0.007846)
Running knn...
pR2_cv: -1.448570 (+/- 0.113684)
###Markdown
Now we can make the ensemble. Note that the ensemble should really be trained on predictions from 1st-stage models that were not trained on the ensemble's test set. This is not the case in a normal CV scheme. We could be slightly overfitting below.
###Code
methods = ['glm','nn','xgb_run','rf','knn']
X_ens = list()
for method in methods:
X_ens.append(Models[method]['Yt_hat'])
# The columns of X_ens are the predictions of each of the above methods
X_ens = np.transpose(np.array(X_ens))
#We can use XGBoost as the 2nd-stage model
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'xgb_run', n_cv=8, verbose=1)
Models['ens_xgb'] = dict()
Models['ens_xgb']['Yt_hat'] = Yt_hat
Models['ens_xgb']['PR2'] = PR2
#Let's also test the weighted model average.
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'lin_comb', n_cv=8, verbose=1)
Models['ens'] = dict()
Models['ens']['Yt_hat'] = Yt_hat
Models['ens']['PR2'] = PR2
plot_model_comparison(['glm', 'nn','xgb_run','rf','knn','ens_xgb','ens'],models=Models,
color=colors,labels = ['GLM','NN','XGB','RF','KNN','Ens.\n (XGBoost)','Ens.\n (Linear)'])
plt.ylim((-.2,.2));
###Output
_____no_output_____
###Markdown
Though the KNN worse than the mean in this case; it could possibly be omitted (as it was in the paper).__We're (mostly) done!__ These are the basics of running these methods. To improve all methods, we can add more features. These could be the predicted spike history, representing wholly new information, or simply engineered function of the original features (as done below). Use engineered featuresWe can get better performance on all methods by engineering the features.
###Code
print('Features are:\n %s' %list(data.drop('time', axis=1).keys()))
X = data.drop('time', axis=1).values
neuron_n = 1
y = m1_imported['spikes'][neuron_n]
methods = ['glm_pyglmnet','nn','xgb_run','rf','knn']
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method+'_engineered'] = dict()
Models[method+'_engineered']['Yt_hat'] = Yt_hat
Models[method+'_engineered']['PR2'] = PR2
print('Running ensemble...')
# Put the previous results in a new data matrix
X_ens = list()
for method in methods:
X_ens.append(Models[method+'_engineered']['Yt_hat'])
X_ens = np.transpose(np.array(X_ens))
#We can use XGBoost as the 2nd-stage model
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'xgb_run', n_cv=8, verbose=1)
Models['ens_xgb_engineered'] = dict()
Models['ens_xgb_engineered']['Yt_hat'] = Yt_hat
Models['ens_xgb_engineered']['PR2'] = PR2
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'lin_comb', n_cv=8, verbose=1)
Models['ens_engineered'] = dict()
Models['ens_engineered']['Yt_hat'] = Yt_hat
Models['ens_engineered']['PR2'] = PR2
plot_model_comparison(['glm_pyglmnet_engineered','nn_engineered','xgb_run_engineered','rf_engineered','knn_engineered','ens_xgb_engineered','ens_engineered']
,models=Models, color=colors,labels = ['GLM','NN','XGB','RF','KNN','Ens.\n (XGBoost)','Ens.\n (Linear)'])
plt.ylim((-.2,.2));
###Output
_____no_output_____
###Markdown
Looks like the tree-based methods (XGBoost and Random Forest) perform the best. The ensemble leverages this performance. Introduce nested CV scheme An optional excursion to ensure the our ensemble's scores are accurateAs mentioned, the ensemble could be overfitting the noise specific to its test set if it's trained on predictions from models that were themselves trained on the test set. This is a problem if we want accurate scores to be used for method comparision. To get around it we'll implement the following scheme.First, the data is split into _p_ folds. These are our 'outer' folds. Each fold contains a test set and a training set for the ensemble.In each of the _p_ folds, we need to build the training set. This we'll accomplish by normal _k_-fold CV for each of our 1st-stage methods. Rotating through the _k_ folds, training and predicting our 1st-stage methods _k_ times, will result in a full training set of predictions. We'll also need the test set. This we can accomplish by training our 1st stage methods on the whole training set, and predicting the test set. This part is just normal _p_-fold CV.Now we have a training set and a test set for the ensemble, so we'll train, predict, and test it. This completes the first fold! After repeating the same process on each of the _p_ folds, we'll have predictions of the neural response for the entire dataset, and _p_ psuedo-$R^2^ scores for the ensemble. We'll have trained the 1st stage method _p(k+1)_ times throughout this process, so it can be quite computationally demanding. It's up to the user to decide if this computational cost is worth the guarantee that the ensemble's score is not inflated because of information leakage.
###Code
# These two methods implement the above scheme. We don't want to be forced to run the ensemble
# at the same time as we train the other methods on each fold, so we'll save the 1st-stage predictions for later
# and use separate methods for training a 1st-stage method and the 2nd-stage method. This will make more sense
# when we implement this.
# Basically, the first method is used to train a 1st-stage method, and the 2nd to train a 2nd-stage method.
def fit_nested_cv(X, Y, algorithm = 'glm_pyglmnet', n_cv_outer=5,n_cv_inner=5, verbose=1):
"""Outputs a list of n_cv_outer prediction vectors Yt_hats, each with length size(Y).
n_cv_outer is p, in the notation above, and n_cv_inner is k.
Each prediction vector will be used to train and test a single fold of the ensemble
in the method `ensemble_cv`. """
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
# indices of outer test/train split for each fold
# It is imperative that the random state be identical to the random state of the Kfold used
# in ensemble_cv
cv_kf = KFold(n_splits=n_cv_outer, shuffle=True, random_state=42)
skf = cv_kf.split(X)
i=1
Y_hat=np.zeros((len(Y),n_cv_outer))
pR2_cv = list()
# In outer loop, we rotate the test set through the full dataset
for idx_r, idx_t in skf:
if verbose > 1:
print( '...runnning outer cv-fold', i, 'of', n_cv_outer)
Xr_o = X[idx_r, :] # train set input
Yr_o = Y[idx_r] # train set output
Xt_o = X[idx_t, :] # test set input
Yt_o = Y[idx_t] # test set output (used for scoring ensemble only)
cv_kf_in = KFold(n_splits=n_cv_inner, shuffle=True, random_state=42)
skf_inner = cv_kf_in.split(Xr_o)
j=1
# In the inner loop, we perform CV to predict the full validation set Yr_o, which will be recorded
# to be used for ensemble training. THEN we use the full Xr_o to predict values for Xt_o, which will
# be used for ensemble evaluation.
for idx_r_inner, idx_t_inner in skf_inner:
j+=1
Xr = Xr_o[idx_r_inner, :]
Yr = Yr_o[idx_r_inner]
Xt = Xr_o[idx_t_inner, :]
Yt = Yr_o[idx_t_inner]
# Predict a fold of the Yr_o (validation)
Yt_hat = eval(algorithm)(Xr, Yr, Xt)
full_indices = idx_r[idx_t_inner] # indices of inner loop
Y_hat[full_indices,i-1] = Yt_hat
Yt_hat.reshape(Yt.shape)
pR2 = poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))
pR2_cv.append(pR2)
if verbose > 1:
print( 'pR2: ', pR2)
# Now predict the ensemble's test set
Yt_hat = eval(algorithm)(Xr_o, Yr_o, Xt_o)
Y_hat[idx_t,i-1] = Yt_hat
pR2 = poisson_pseudoR2(Yt_o, Yt_hat, np.mean(Yr_o))
pR2_cv.append(pR2)
i+=1
if verbose > 0:
print("pR2_cv: %0.6f (+/- %0.6f)" % (np.mean(pR2_cv),
np.std(pR2_cv)/np.sqrt(n_cv_inner*n_cv_outer)))
return Y_hat, pR2_cv
def ensemble_cv(X_list, Y, algorithm = 'glm_pyglmnet', n_cv_outer=5, verbose=1):
"""Outputs the scores and prediction of the ensemble on held-out data.
X = list of (size(Y),n_cv_outer) np array. Each array is a previously trained method's predictions
all single folds. It should be of dimension (np.size(Y),n_cv_outer).
It is necessary that n_cv_outer and the random state of the KFold be the same as
in `fit_nested_cv`. """
for x in X_list:
assert x.shape == (np.size(Y),n_cv_outer)
# indices of outer test/train split for each fold
cv_kf = KFold(n_splits=n_cv_outer, shuffle=True, random_state=42)
skf = cv_kf.split(X_list[0])
i=0
Y_hat=np.zeros(len(Y))
pR2_cv = list()
for idx_r, idx_t in skf:
# Get the first fold from each list
X = np.array([x[:,i] for x in X_list])
X = X.transpose()
Xr = X[idx_r, :]
Yr = Y[idx_r]
Xt = X[idx_t, :]
Yt = Y[idx_t]
i+=1
if verbose > 1:
print( '...runnning cv-fold', i, 'of', n_cv_outer)
Yt_hat = eval(algorithm)(Xr, Yr, Xt)
Y_hat[idx_t] = Yt_hat
pR2 = poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))
pR2_cv.append(pR2)
if verbose > 1:
print( 'pR2: ', pR2)
if verbose > 0:
print("pR2_cv: %0.6f (+/- %0.6f)" % (np.mean(pR2_cv),
np.std(pR2_cv)/np.sqrt(n_cv_outer)))
return Y_hat, pR2_cv
###Output
_____no_output_____
###Markdown
Let's run this on the same neuron above and see if there's a difference in the ensemble score
###Code
methods = ['glm_pyglmnet','nn','xgb_run','rf','knn']
X_ens = list()
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_nested_cv(X, y, algorithm = method ,n_cv_outer=8)
Models[method+'_nested'] = dict()
Models[method+'_nested']['Yt_hat'] = Yt_hat
Models[method+'_nested']['PR2'] = PR2
# Put the previous results in a new data matrix
X_ens.append(Yt_hat)
print('Running ensemble...')
Yt_hat, PR2 = ensemble_cv(X_ens, y, algorithm = 'xgb_run', n_cv_outer=8, verbose=1)
Models['ens_nested'] = dict()
Models['ens_nested']['Yt_hat'] = Yt_hat
Models['ens_nested']['PR2'] = PR2
plot_model_comparison(['ens_engineered','ens_nested']
,models=Models, color=colors,labels = ['Ens. \n(not nested)','Ens. (nested)'])
###Output
_____no_output_____
###Markdown
__We can see that the nested CV scheme didn't make much of a difference on this dataset. For now we can assume that normal k-fold CV is not overfitting the data.__ Run over all neurons in the datasetIn the publication we tested 4 feature sets for the M1 dataset. Here we'll just run over the engineered feature set.This is currently set up to run normal CV scheme. This for for expediency; with this dataset and these methods, the nested CV scheme took our machine several hours to run.
###Code
methods = ['glm_pyglmnet','nn','xgb_run','rf','knn']
# We'll store both the predictions and the psuedo-R2 score in a dictionary for each
for method in methods:
Models[method+'_all'] = dict()
Models[method+'_all']['PR2'] = list()
Models[method+'_all']['Yt_hat'] = list()
Models['ens_all'] = dict()
Models['ens_all']['Yt_hat'] = list()
Models['ens_all']['PR2'] = list()
nneurons = np.shape(m1_imported['spikes'])[0]
for i in range(nneurons):
print '\n running for neuron %d' % i
y = m1_imported['spikes'][i]
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method+'_all']['Yt_hat'].append(Yt_hat)
Models[method+'_all']['PR2'].append(PR2)
print('Running ensemble...')
# Put the previous results in a new data matrix
X_ens = list()
for method in methods:
X_ens.append(Models[method+'_all']['Yt_hat'][i])
X_ens = np.transpose(np.array(X_ens))
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'xgb_run', n_cv=8, verbose=1)
Models['ens_all']['Yt_hat'].append(Yt_hat)
Models['ens_all']['PR2'].append(PR2)
###Output
running for neuron 0
Running glm_pyglmnet...
pR2_cv: 0.019708 (+/- 0.002247)
Running nn...
pR2_cv: 0.025202 (+/- 0.003476)
Running xgb_run...
pR2_cv: 0.043901 (+/- 0.003588)
Running rf...
pR2_cv: 0.030623 (+/- 0.004210)
Running knn...
pR2_cv: -2.723586 (+/- 0.165410)
Running ensemble...
pR2_cv: 0.039113 (+/- 0.004420)
running for neuron 1
Running glm_pyglmnet...
pR2_cv: 0.050800 (+/- 0.001905)
Running nn...
pR2_cv: 0.107952 (+/- 0.003695)
Running xgb_run...
pR2_cv: 0.151781 (+/- 0.004155)
Running rf...
pR2_cv: 0.153035 (+/- 0.007690)
Running knn...
pR2_cv: -1.448570 (+/- 0.113684)
Running ensemble...
pR2_cv: 0.160966 (+/- 0.004447)
running for neuron 2
Running glm_pyglmnet...
pR2_cv: 0.068629 (+/- 0.002075)
Running nn...
pR2_cv: 0.081361 (+/- 0.003728)
Running xgb_run...
pR2_cv: 0.116009 (+/- 0.003960)
Running rf...
pR2_cv: 0.110817 (+/- 0.009056)
Running knn...
pR2_cv: -1.435584 (+/- 0.083983)
Running ensemble...
pR2_cv: 0.117260 (+/- 0.005539)
running for neuron 3
Running glm_pyglmnet...
pR2_cv: 0.040684 (+/- 0.001907)
Running nn...
pR2_cv: 0.076482 (+/- 0.003945)
Running xgb_run...
pR2_cv: 0.108575 (+/- 0.004181)
Running rf...
pR2_cv: 0.097349 (+/- 0.007634)
Running knn...
pR2_cv: -2.301393 (+/- 0.125503)
Running ensemble...
pR2_cv: 0.109097 (+/- 0.005146)
running for neuron 4
Running glm_pyglmnet...
pR2_cv: 0.024160 (+/- 0.002219)
Running nn...
pR2_cv: 0.189817 (+/- 0.003849)
Running xgb_run...
pR2_cv: 0.302463 (+/- 0.004459)
Running rf...
pR2_cv: 0.305592 (+/- 0.006707)
Running knn...
pR2_cv: 0.190224 (+/- 0.011230)
Running ensemble...
pR2_cv: 0.306045 (+/- 0.005913)
running for neuron 5
Running glm_pyglmnet...
pR2_cv: 0.002092 (+/- 0.000789)
Running nn...
pR2_cv: -0.007359 (+/- 0.005511)
Running xgb_run...
pR2_cv: 0.024298 (+/- 0.004774)
Running rf...
pR2_cv: -0.024255 (+/- 0.017313)
Running knn...
pR2_cv: -7.400815 (+/- 0.215506)
Running ensemble...
pR2_cv: 0.018893 (+/- 0.006812)
running for neuron 6
Running glm_pyglmnet...
pR2_cv: 0.045851 (+/- 0.001498)
Running nn...
pR2_cv: 0.050224 (+/- 0.001662)
Running xgb_run...
pR2_cv: 0.086977 (+/- 0.001950)
Running rf...
pR2_cv: 0.078311 (+/- 0.003805)
Running knn...
pR2_cv: -0.908422 (+/- 0.045085)
Running ensemble...
pR2_cv: 0.085858 (+/- 0.003321)
running for neuron 7
Running glm_pyglmnet...
pR2_cv: -0.000145 (+/- 0.000054)
Running nn...
pR2_cv: -0.009065 (+/- 0.007305)
Running xgb_run...
pR2_cv: -0.027098 (+/- 0.017731)
Running rf...
pR2_cv: -0.208098 (+/- 0.077388)
Running knn...
pR2_cv: -5.395353 (+/- 0.192988)
Running ensemble...
pR2_cv: -0.070074 (+/- 0.048760)
running for neuron 8
Running glm_pyglmnet...
pR2_cv: 0.002439 (+/- 0.000841)
Running nn...
pR2_cv: -0.016480 (+/- 0.008390)
Running xgb_run...
pR2_cv: -0.000922 (+/- 0.009676)
Running rf...
pR2_cv: -0.127746 (+/- 0.023388)
Running knn...
pR2_cv: -6.498334 (+/- 0.155970)
Running ensemble...
pR2_cv: -0.000204 (+/- 0.006620)
running for neuron 9
Running glm_pyglmnet...
###Markdown
This is a method for nicely plotting the scores of methods over an entire dataset:
###Code
import mpl_toolkits.axes_grid.inset_locator as il
def bootstrap(yy):
# sample with replacement
means = []
N = 1000
for i in range(N):
yy_samp = np.random.choice(yy,len(yy))
means.append(np.mean(yy_samp))
means = np.sort(means)
crit05 = int(0.025*N)
crit95 = int(0.975*N)
return np.abs(np.mean(yy)-means[[crit05,crit95]])
def plot_scatter(models_,ax_ran=[0, 0.2, 0, 0.2],ts=16,models = Models,labels = None):
"""Plots scores as a scatter plot with an inset of the population mean.
models_ = list of strings of the methods to plot. First in the list is the x-axis, rest are scatter.
The strings should be keys in the dict `models`.
ax_ran = the plotting range. Given to plt.axis
ts = label size
models = dictionary that contains results of methods.
labels = list of labels for inset bar chart. Defaults to the keys of the dictionary. """
if labels is None:
labels = models_
glm_ = models_[0]
sn = np.sqrt(len(np.array(models[glm_]['PR2'][1])))
xx = np.mean(np.array(models[glm_]['PR2']), axis=1)
xerr = np.std(np.array(models[glm_]['PR2']), axis=1)/sn
alpha_lines = 0.3
alpha = 1
lim = ax_ran[1]
i=0
p = []
means=[np.mean(xx)]
mean_err = bootstrap(xx)
for mod in models_[1:]:
yy = np.mean(np.array(models[mod]['PR2']), axis=1)
yerr = np.std(np.array(models[mod]['PR2']), axis=1)/sn
## uncomment to plot error
#p1,c,b=plt.errorbar(xx, yy,fmt=marks[i], xerr = xerr, yerr = yerr ,ms=msize[i], alpha=1,color=colors[i+1])
p1,=plt.plot(xx, yy, '.', ms=10, alpha=.5,color=colors[i+1])
p.append(p1)
plt.plot([-lim, lim], [-lim, lim], 'k:', lw=1, alpha=alpha_lines)
i+=1
means.append(np.mean(yy))
mean_err = np.vstack((mean_err,bootstrap(yy)))
ax = plt.gca()
simpleaxis(ax)
plt.xlabel('GLM pseudo-$R^2$',fontsize=ts)
plt.ylabel('Modern ML pseudo-$R^2$',fontsize=ts)
plt.axis(ax_ran)
ax.set_yticks([0], minor=True)
ax.set_xticks([0], minor=True)
plt.grid(which='minor')
# insert bar chart that is the average
freq = np.round(1.5*np.max(means)/5,2)
if freq<0.01:
freq = np.round(np.max(means)/5,3)
inset_axes = il.inset_axes(ax,
width="40%", # width = 34% of parent_bbox
height="50%", # height : 1 inch
loc=7, borderpad = 1.5,
axes_kwargs = {'title':'Mean p$R^2$',
'yticks':np.arange(0,np.max(means)*1.5,freq),
'ylim':[-np.max(means)*.1,np.max(means)*1.5] })
plt.bar(np.arange(np.size(means)), means, 0.8, align='center',
ecolor='k', alpha=.9, color=colors, ec='w',yerr = mean_err.transpose(),
tick_label=labels)
plt.plot([-.6,len(labels)-.6],[0,0],color='black',)
plot_scatter(['glm_pyglmnet_all','nn_all','xgb_run_all','ens_all'],ax_ran=[0, 0.3, 0, 0.3],ts=16,
models = Models,labels = ['GLM','NN','XGB','Ens.'])
plt.show()
###Output
_____no_output_____
###Markdown
Note that the GLM has not been optimized on this feature set for this tutorial. We managed in the paper to bring it to the level of the NN. Our parameters for XGBoost could be improved, too. Here we observe that it performs better in general (as seen in the scatter) but that some outliers were overfit and are pulling down the mean scores. To improve the methods, we can optimize their parameters with the following method in Appendix 1. Appendix 1: Hyperparameter optimizationEach of the above methods has a number of parameters that can be changed. Selecting the correct values isn't trivial, and can have a very large effect on the output score. Since the 'model parameters' already means something else, these tunable parameters are called 'hyperparameters'. Scikit-learn has number of methods for finding hyperpameters ("see here"[http://scikit-learn.org/stable/modules/grid_search.html]). These include grid and randomized search.We're a fan of Bayesian Optimization, since it seems a bit smarter. There are a number of implementations of this; we chose to use BayesOpt. __(Running this method will require installing this "package"[https://github.com/fmfn/BayesianOptimization])__ The following method is given as an example. The method is a bit convoluted and perphaps messy (sorry!) but is designed for ease-of-use. Here, we optimize for XGBoost.
###Code
from bayes_opt import BayesianOptimization
from sklearn.cross_validation import train_test_split
def bayes_opt_xgb(X,y,xgbBO=None, num_iter=10):
"""Return an optmized XGB for a dataset. Allows warm start; can pass Bayes_opt object from previous run."""
# Optimize on a held-out set.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
params=dict()
# BayesOpt optimizes any function that takes parameters and returns a score.
# We need to define a function like this for XGBoost. That's what xgb_evaluate does.
def xgb_evaluate(min_child_weight,
colsample_bytree,
max_depth,
subsample,
gamma,
learning_rate,
n_estimators):
params['min_child_weight'] = int(min_child_weight)
params['colsample_bytree'] = max(min(colsample_bytree, 1), 0)
params['max_depth'] = int(max_depth)
params['subsample'] = max(min(subsample, 1), 0)
params['gamma'] = max(gamma, 0)
params['learning_rate'] = max(learning_rate, 0)
params['n_estimators'] = max(n_estimators, 0)
# This actually creates the instance of XGBoost. It will be fed to fit_cv, which will output the score.
def xgb_new(Xr, Yr, Xt):
params['objective']= "count:poisson" #for poisson output
params['eval_metric']= "logloss" #loglikelihood loss
params['seed']= 2925 #for reproducibility
params['silent']= 1
params['nthread'] = -1 #with -1 it will use all available threads
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
num_round = 200
bst = xgb.train(params, dtrain, num_round)
Yt = bst.predict(dtest)
return Yt
Yt_hat, PR2 = fit_cv(X_train, y_train, xgb_new, n_cv=6, verbose=0, label=[])
return np.mean(PR2)
random_state = 2016
if not xgbBO: #allow warm start. If starting from scratch, do:
init_points=1
# Here is where we set the bounds of the parameters to explore.
xgbBO = BayesianOptimization(xgb_evaluate, {'min_child_weight': (1, 10),
'colsample_bytree': (1, 1),
'max_depth': (1, 10),
'subsample': (0.5, 1),
'gamma': (0, 0.4),
'learning_rate': (0.05, 0.25),
'n_estimators':(10, 1000)
},verbose=1)
# can place results of previous runs here to improve performance
xgbBO.explore( {'colsample_bytree': [1,1.0,1,1],
'gamma': [0.21173851024558027,0.4,0.2199632,0.18957],
'learning_rate': [0.10848417655111556,0.05,0.0952752,0.099619],
'max_depth': [2.097849266768804,8.1, 1.3364514,2.39364],
'min_child_weight': [8.85532939789740331,1,3.06446781,6.33840],
'n_estimators': [900.3009605914325,511,190.8645, 925.70248],
'subsample': [0.83434308305954963,0.5,0.85830945,0.798837]})
else:
init_points = 2
# This is the actual optimization method.
xgbBO.maximize(init_points=init_points, n_iter=num_iter, xi=0.0)
values = xgbBO.res['max']
print(values)
#want to return an optmized method for later use!! We can also see the parameters by typing xgbBO.res['max']
params = values['max_params']
def xgb_best(Xr, Yr, Xt,returnModel=0):
params['min_child_weight'] = int(params['min_child_weight'])
params['max_depth'] = int(params['max_depth'])
params['objective']= "count:poisson" #for poisson output
params['eval_metric']= "logloss" #loglikelihood loss
params['seed']= 2925 #for reproducibility
params['silent']= 1
params['nthread'] = -1 #with -1 it will use all available threads
params['colsample_bytree']= 1.0
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
num_round = 200
bst = xgb.train(params, dtrain, num_round)
Yt = bst.predict(dtest)
if returnModel:
return bst
else:
return Yt
return xgb_best, xgbBO
###Output
_____no_output_____
###Markdown
This method can be used as follows.
###Code
XGB_optimized, xgbBO_optimized = bayes_opt_xgb(X,y,num_iter=5)
Yt_hat, PR2 = fit_cv(X, y, algorithm = 'XGB_optimized', n_cv=10, verbose=1, label=[])
###Output
_____no_output_____
###Markdown
Appendix 2: GLM implemented with R's glmnet.Since the GLM depends on a proper learning rate, regularization path, etc., it can be helpful to compare with other packages with different defaults. The package in R (glmnet) is pretty robust. If it is desired to compare the two GLMs, one can use R's glmnet in python with the following method (designed to interface with `fit_cv`).This requires the installation of rpy2.
###Code
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri as n2r
n2r.activate()
r = ro.r
r.library('glmnet')
def r_glmnet(Xr,Yr,Xt):
yr = ro.FloatVector(Yr)
trained_model = r['glmnet'](Xr, yr, family="poisson",alpha = 0.1)
lambda_min = np.asanyarray(trained_model.rx2('lambda'))[-1]
Yt = r['predict'](trained_model,s=lambda_min,newx=Xt,type="response")
# betas = np.asanyarray(r['as.matrix'](trained_model.rx2('beta')))[:,-1]
# beta0 = np.asanyarray(r['as.matrix'](trained_model.rx2('a0')))[-1]
# print(betas,beta0)
return np.array(list(Yt))
###Output
_____no_output_____
###Markdown
**Warning:** This notebook is 'deprecated', as now we're working on the 'MLencoding' class which is demoed in its own notebook. This one might be useful if you want some tips on implementing these things yourself. Beyond GLMsThis notebook accompanies "Modern Machine Learning Far Outperforms GLMs at Predicting Spikes". We implement various Machine Learning algorithms for spike prediction and offer this as a Python template. Table of contents0. Loading data0. Define helper functions for scoring and cross-validation0. Define Models 0. GLM 0. XGBoost 0. Neural Network 0. Random Forest 0. Ensemble0. Nested cross-validation (for evaluating the ensemble)0. Model Comparison 0. M1 original features 0. M1 engineered features 0. M1 all neurons0. Appendix 1: Hyperparameter optimization0. Appendix 2: Running R's glmnet in python DependenciesBasics- numpy- pandas- scipy- matplotlibMethods- sklearn- pyglmnet (glm)- xgboost- theano (NN)- keras (NN)Other- BayesOpt (Bayesian optimization for better hyperparameters)
###Code
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import scipy.io
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
%matplotlib inline
#for plots
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis.set_tick_params(size=6)
ax.yaxis.set_tick_params(size=6)
colors=['#F5A21E', '#02A68E', '#EF3E34', '#134B64', '#FF07CD','b']
###Output
_____no_output_____
###Markdown
1. DataBelow we load two datasets available on CRCNS: a [Macaque M1](http://crcns.org/data-sets/movements/dream/downloading-dream) (from [Stevenston et al. 2011](http://jn.physiology.org/content/106/2/764.short)) and a [Rat Hippocampus](http://crcns.org/data-sets/hc/hc-2/about-hc-2) from [Mizuseki et al. 2009](http://www.ncbi.nlm.nih.gov/pubmed/19874793).The data has been organized in Matlab into neat arrays for easy loading here.We will soon want a single numpy array representing the external covariates, and a single numpy vector representing the neural response. The data array X will be of dimensions (n, p), where n is the number of time bins and p is the number of covariates, and the response y will be of dimensions (n, ) . We use pandas as an intermediate tool for data organizing, but it's really not necessary - if using your own data just wrangle it into numpy arrays of proper dimension. Load data
###Code
m1_imported = scipy.io.loadmat('../data/m1_stevenson_2011.mat')
###Output
_____no_output_____
###Markdown
1.1 CovariatesPull into pandas dataframe. This allows us to easily access covariates by name.
###Code
data = pd.DataFrame()
data['time'] = m1_imported['time'][0]
data['handPos_x'] = m1_imported['handPos'][0]
data['handPos_y'] = m1_imported['handPos'][1]
data['handVel_x'] = m1_imported['handVel'][0]
data['handVel_y'] = m1_imported['handVel'][1]
data.head()
###Output
_____no_output_____
###Markdown
Plot raw M1 dataThis gives some intuitive feeling for the type of data we're working with.
###Code
plt.figure(figsize=(10,10))
plt.rcParams['font.size'] = 10
plt.rcParams['font.weight'] = 'medium'
ldata = m1_imported['spikes'].shape
# Position scatter
ts = 14
gs0 = gridspec.GridSpec(3, 2)
plt.subplot(gs0[0,0])
simpleaxis(plt.gca())
plt.scatter(data['handPos_x'], data['handPos_y'], s=.1, alpha=.1)
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.axis('equal')
plt.title('Hand Position',fontsize=ts)
plt.ylim([-0.42,-0.16])
plt.xlim([-0.15,0.15])
plt.xticks(np.arange(-0.15,0.2, .1))
# Velocity scatter
plt.subplot(gs0[0,1])
simpleaxis(plt.gca())
plt.scatter(data['handVel_x'], data['handVel_y'], s=.1, alpha=.1)
plt.xlabel('$v_x$ [m/s]')
plt.ylabel('$v_y$ [m/s]')
plt.axis('equal')
plt.title('Hand Velocity',fontsize=ts)
# Position trace
plt.subplot(gs0[1,:])
a=plt.gca()
simpleaxis(a)
a.text(-20,-.03,'X',weight='bold')
a.text(-20,-.33,'Y',weight='bold')
alpha = 0.8
lw = 1
plt.plot(data['time'], data['handPos_x'], 'k-', alpha=alpha, lw=lw)
plt.plot(data['time'], data['handPos_y'], '-', color=colors[0], alpha=alpha, lw=lw)
a.axis([-40,800,-0.5,0.1])
plt.xlabel('Time [s]')
plt.ylabel('x, y [m]')
plt.title('Hand Position')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Compute more covariates/featuresThese will be used as the 'engineered' features for improving the GLM's performance.
###Code
data['velDir'] = np.arctan2(data['handVel_y'], data['handVel_x'])
data['cos_velDir'] = np.cos(data['velDir'])
data['sin_velDir'] = np.sin(data['velDir'])
data['speed'] = np.sqrt(data['handVel_x'].values**2+data['handVel_y'].values**2)
r = np.arctan2(data['handPos_y'], data['handPos_x'])
data['cos_PosDir'] = np.cos(r)
data['sin_PosDir'] = np.sin(r)
data['radial_Pos'] = np.sqrt(data['handPos_x'].values**2+data['handPos_y'].values**2)
data.head()
###Output
_____no_output_____
###Markdown
3. Cross-validationIt's important to 1. evaluate the performance of a method on held-out data2. train as as much data as you can3. have greater confidence in your results (see comic). These three criteria are met with [k-fold cross-validation (CV) ](https://en.wikipedia.org/wiki/Cross-validation_(statistics). The method `fit_cv` below returns a cross-validated pseudo-R2 score for an algorithm on a dataset.__Note regarding ensemble:__ When training a stacked ensemble, it's good practice to train the 2nd-stage regressor on the predictions of 1st-stage regressors _not trained on data in the ensemble's test set_. Otherwise there could be some data leakage and overfitting; the 1st-stage regressors may have learned some of statistics of the noise specific to the test set, for example. It's not guaranteed this will happen, especially for larger datasets, but to be correct you can use the `fit_nested_cv` and accompanying `ensemble_cv` methods in __Appendix 1__. [](http://xkcd.com/1725/)
###Code
from sklearn.model_selection import KFold
def poisson_pseudoR2(y, yhat, ynull):
# This is our scoring function. Implements pseudo-R2
yhat = yhat.reshape(y.shape)
eps = np.spacing(1)
L1 = np.sum(y*np.log(eps+yhat) - yhat)
L1_v = y*np.log(eps+yhat) - yhat
L0 = np.sum(y*np.log(eps+ynull) - ynull)
LS = np.sum(y*np.log(eps+y) - y)
R2 = 1-(LS-L1)/(LS-L0)
return R2
def fit_cv(X, Y, algorithm, n_cv=10, verbose=1):
"""Performs cross-validated fitting. Returns (Y_hat, pR2_cv); a vector of predictions Y_hat with the
same dimensions as Y, and a list of pR2 scores on each fold pR2_cv.
X = input data
Y = spiking data
algorithm = a function of (Xr, Yr, Xt) {training data Xr and response Yr and testing features Xt}
and returns the predicted response Yt
n_cv = number of cross-validations folds
"""
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
cv_kf = KFold(n_splits=n_cv, shuffle=True, random_state=42)
skf = cv_kf.split(X)
i=1
Y_hat=np.zeros(len(Y))
pR2_cv = list()
for idx_r, idx_t in skf:
if verbose > 1:
print( '...runnning cv-fold', i, 'of', n_cv)
i+=1
Xr = X[idx_r, :]
Yr = Y[idx_r]
Xt = X[idx_t, :]
Yt = Y[idx_t]
Yt_hat = eval(algorithm)(Xr, Yr, Xt)
Y_hat[idx_t] = Yt_hat
pR2 = poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))
pR2_cv.append(pR2)
if verbose > 1:
print( 'pR2: ', pR2)
if verbose > 0:
print("pR2_cv: %0.6f (+/- %0.6f)" % (np.mean(pR2_cv),
np.std(pR2_cv)/np.sqrt(n_cv)))
return Y_hat, pR2_cv
def plot_model_comparison(models_for_plot, models=[], color='r', title=None, labels=[],fs=12):
"""Just makes a comparision bar plot."""
plt.plot([-1, len(models_for_plot)], [0,0],'--k', alpha=0.4)
if not labels:
labels = models_for_plot
mean_pR2 = list()
sem_pR2 = list()
for model in models_for_plot:
PR2_art = models[model]['PR2']
mean_pR2.append(np.mean(PR2_art))
sem_pR2.append(np.std(PR2_art)/np.sqrt(np.size(PR2_art)))
plt.bar(np.arange(np.size(mean_pR2)), mean_pR2, 0.8, align='center',
ecolor='k', alpha=.9, color=color, ec='w', yerr=np.array(sem_pR2),
tick_label=labels)
plt.plot(np.arange(np.size(mean_pR2)), mean_pR2, 'k.', markersize=15)
plt.ylabel('Pseudo-R2',fontsize=fs)
simpleaxis(plt.gca())
if title:
plt.title(title)
###Output
_____no_output_____
###Markdown
Define Models GLMNote: Different problems may require different regularization parameters __alpha__ and __reg_lambda__. The __learning_rate, tol__, and __max_iter__ should also be adjusted to ensure convergence (they can be touchy).One can also compare two implementations of a GLM against each other to ensure proper convergence. See appendix for a note of how to implement R's 'glmnet' in python.
###Code
from pyglmnet import GLM
def glm_pyglmnet(Xr, Yr, Xt):
glm = GLM(distr='softplus', alpha=0.1, tol=1e-8, verbose=0,
reg_lambda=np.logspace(np.log(0.05), np.log(0.0001), 10, base=np.exp(1)),
learning_rate=2, max_iter=10000, eta=2.0, random_state=1)
glm.fit(Xr, Yr)
Yt = glm[-1].predict(Xt)
return Yt
###Output
_____no_output_____
###Markdown
XGBoostNote: Many of these parameters __(learning rate, estimators, subsampling, max_depth, and gamma)__ should be optimized for the prediction problem at hand. Optimization can be done with a grid search, randomized search, or with Bayesian Optimization (see appendix at bottom.)
###Code
import xgboost as xgb
def xgb_run(Xr, Yr, Xt):
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 1,
'learning_rate': 0.05,
'min_child_weight': 2, 'n_estimators': 580,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.4}
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
num_round = 200
bst = xgb.train(params, dtrain, num_round)
Yt = bst.predict(dtest)
return Yt
###Output
_____no_output_____
###Markdown
Neural NetsNote: Again, these parameters should be optimized. We highlight __dropout__, elastic net regularization __l1, l2__, and the number of nodes in the hidden layers. Optimization can be done with a grid search, randomized search, or with Bayesian Optimization (see appendix at bottom.)There are many, many options for implementing NNs. One might also test maxnorm regularization, e.g. RMSprop instead of Nadam, more or less layers, or different batch sizes or number of epochs.
###Code
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
from keras.regularizers import l1l2
from keras.optimizers import Nadam
def nn(Xr, Yr, Xt):
params = {'dropout': 0.5,
'l1': 0.0,
'l2': 0.0,
'n1': 1980, #number of layers in 1st hidden layer
'n2': 18}
if np.ndim(Xr)==1:
Xr = np.transpose(np.atleast_2d(Xr))
model = Sequential()
model.add(Dense(params['n1'], input_dim=np.shape(Xr)[1], init='glorot_normal',
activation='relu', W_regularizer=l1l2(params['l1'],params['l2'])))
model.add(Dropout(params['dropout']))
model.add(Dense(params['n2'], init='glorot_normal'
, activation='relu',W_regularizer=l1l2(params['l1'],params['l2'])))
model.add(Dense(1,activation='softplus'))
optim = Nadam()
model.compile(loss='poisson', optimizer=optim,)
hist = model.fit(Xr, Yr, batch_size = 32, nb_epoch=5, verbose=0, validation_split=0.0)
Yt = model.predict(Xt)[:,0]
return Yt
###Output
_____no_output_____
###Markdown
Other methodsThese methods aren't highlighted in the paper but may help to improve the ensemble. Random Forest
###Code
from sklearn.ensemble import RandomForestRegressor
def rf(Xr, Yr, Xt):
params = {'max_depth': 15,
'min_samples_leaf': 4,
'min_samples_split': 5,
'min_weight_fraction_leaf': 0.0,
'n_estimators': 471}
clf = RandomForestRegressor(**params)
clf.fit(Xr, Yr)
Yt = clf.predict(Xt)
return Yt
###Output
_____no_output_____
###Markdown
K-nearest neighbors
###Code
from sklearn.neighbors import KNeighborsRegressor
def knn(Xr, Yr, Xt):
neigh = KNeighborsRegressor(n_neighbors=5,weights='distance')
neigh.fit(Xr, Yr)
Yt = neigh.predict(Xt)
#returns list of probabilities for each category
return Yt
###Output
_____no_output_____
###Markdown
EnsembleOne can use any algorithm as the stacked model (the one which takes previous models as inputs). XGBoost tends to work well. Note that the input data Xr and Xt will be the _outputs_ Yt_hat of the above functions.A simple weighted model average also tends to work fairly well, in which case we'll use a (rectified) linear regressor as the stacked regressor. That's defined below.
###Code
from sklearn.linear_model import LinearRegression
def lin_comb(Xr, Yr, Xt):
lr = LinearRegression()
lr.fit(Xr, Yr)
Yt = lr.predict(Xt)
#rectify outputs
Yt = np.maximum(Yt,np.zeros(Yt.shape))
return Yt
###Output
_____no_output_____
###Markdown
Method comparisonLet's take a single neuron from the M1 set and test the above methods.First we'll test on the set of _original features_ (not engineered).
###Code
X = data[['handPos_x','handPos_y','handVel_x','handVel_y']].values
neuron_n = 1
y = m1_imported['spikes'][neuron_n]
###Output
_____no_output_____
###Markdown
Let's plot the neuron's response vs. velocity direction.
###Code
plt.figure(figsize=(10,5))
# Use jitter on the y axis for vizualization of trends
y_noise = np.random.randn(np.size(y))
nnoise = 0.2
plt.subplot(121)
simpleaxis(plt.gca())
plt.plot(data['handVel_x'], y+nnoise*y_noise, 'k.', alpha=0.1)
plt.xlabel('Speed in X')
plt.ylabel('spike counts')
plt.subplot(122)
simpleaxis(plt.gca())
plt.plot(data['handVel_y'], y+nnoise*y_noise, 'k.', alpha=0.1)
plt.xlabel('Speed in Y')
plt.ylabel('spike counts')
plt.show()
###Output
_____no_output_____
###Markdown
Now we can fit the data using a method from above:
###Code
#We'll store results here.
Models = dict()
Yt_hat, PR2 = fit_cv(X, y, algorithm = 'glm_pyglmnet', n_cv=8, verbose=2)
Models['glm'] = dict()
Models['glm']['Yt_hat'] = Yt_hat
Models['glm']['PR2'] = PR2
###Output
('...runnning cv-fold', 1, 'of', 8)
('pR2: ', 0.047990744845053945)
('...runnning cv-fold', 2, 'of', 8)
('pR2: ', 0.041296859143503339)
('...runnning cv-fold', 3, 'of', 8)
('pR2: ', 0.055977421252294768)
('...runnning cv-fold', 4, 'of', 8)
('pR2: ', 0.046396193112219875)
('...runnning cv-fold', 5, 'of', 8)
('pR2: ', 0.058007222444918738)
('...runnning cv-fold', 6, 'of', 8)
('pR2: ', 0.052409653792728994)
('...runnning cv-fold', 7, 'of', 8)
('pR2: ', 0.056002175496805306)
('...runnning cv-fold', 8, 'of', 8)
('pR2: ', 0.048319487792282678)
pR2_cv: 0.050800 (+/- 0.001905)
###Markdown
Or all at once:
###Code
methods = ['nn','xgb_run','rf','knn']
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method] = dict()
Models[method]['Yt_hat'] = Yt_hat
Models[method]['PR2'] = PR2
###Output
Running nn...
pR2_cv: 0.103180 (+/- 0.003494)
Running xgb_run...
pR2_cv: 0.151781 (+/- 0.004155)
Running rf...
pR2_cv: 0.152336 (+/- 0.007846)
Running knn...
pR2_cv: -1.448570 (+/- 0.113684)
###Markdown
Now we can make the ensemble. Note that the ensemble should really be trained on predictions from 1st-stage models that were not trained on the ensemble's test set. This is not the case in a normal CV scheme. We could be slightly overfitting below.
###Code
methods = ['glm','nn','xgb_run','rf','knn']
X_ens = list()
for method in methods:
X_ens.append(Models[method]['Yt_hat'])
# The columns of X_ens are the predictions of each of the above methods
X_ens = np.transpose(np.array(X_ens))
#We can use XGBoost as the 2nd-stage model
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'xgb_run', n_cv=8, verbose=1)
Models['ens_xgb'] = dict()
Models['ens_xgb']['Yt_hat'] = Yt_hat
Models['ens_xgb']['PR2'] = PR2
#Let's also test the weighted model average.
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'lin_comb', n_cv=8, verbose=1)
Models['ens'] = dict()
Models['ens']['Yt_hat'] = Yt_hat
Models['ens']['PR2'] = PR2
plot_model_comparison(['glm', 'nn','xgb_run','rf','knn','ens_xgb','ens'],models=Models,
color=colors,labels = ['GLM','NN','XGB','RF','KNN','Ens.\n (XGBoost)','Ens.\n (Linear)'])
plt.ylim((-.2,.2));
###Output
_____no_output_____
###Markdown
Though the KNN worse than the mean in this case; it could possibly be omitted (as it was in the paper).__We're (mostly) done!__ These are the basics of running these methods. To improve all methods, we can add more features. These could be the predicted spike history, representing wholly new information, or simply engineered function of the original features (as done below). Use engineered featuresWe can get better performance on all methods by engineering the features.
###Code
print('Features are:\n %s' %list(data.drop('time', axis=1).keys()))
X = data.drop('time', axis=1).values
neuron_n = 1
y = m1_imported['spikes'][neuron_n]
methods = ['glm_pyglmnet','nn','xgb_run','rf','knn']
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method+'_engineered'] = dict()
Models[method+'_engineered']['Yt_hat'] = Yt_hat
Models[method+'_engineered']['PR2'] = PR2
print('Running ensemble...')
# Put the previous results in a new data matrix
X_ens = list()
for method in methods:
X_ens.append(Models[method+'_engineered']['Yt_hat'])
X_ens = np.transpose(np.array(X_ens))
#We can use XGBoost as the 2nd-stage model
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'xgb_run', n_cv=8, verbose=1)
Models['ens_xgb_engineered'] = dict()
Models['ens_xgb_engineered']['Yt_hat'] = Yt_hat
Models['ens_xgb_engineered']['PR2'] = PR2
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'lin_comb', n_cv=8, verbose=1)
Models['ens_engineered'] = dict()
Models['ens_engineered']['Yt_hat'] = Yt_hat
Models['ens_engineered']['PR2'] = PR2
plot_model_comparison(['glm_pyglmnet_engineered','nn_engineered','xgb_run_engineered','rf_engineered','knn_engineered','ens_xgb_engineered','ens_engineered']
,models=Models, color=colors,labels = ['GLM','NN','XGB','RF','KNN','Ens.\n (XGBoost)','Ens.\n (Linear)'])
plt.ylim((-.2,.2));
###Output
_____no_output_____
###Markdown
Looks like the tree-based methods (XGBoost and Random Forest) perform the best. The ensemble leverages this performance. Introduce nested CV scheme An optional excursion to ensure the our ensemble's scores are accurateAs mentioned, the ensemble could be overfitting the noise specific to its test set if it's trained on predictions from models that were themselves trained on the test set. This is a problem if we want accurate scores to be used for method comparision. To get around it we'll implement the following scheme.First, the data is split into _p_ folds. These are our 'outer' folds. Each fold contains a test set and a training set for the ensemble.In each of the _p_ folds, we need to build the training set. This we'll accomplish by normal _k_-fold CV for each of our 1st-stage methods. Rotating through the _k_ folds, training and predicting our 1st-stage methods _k_ times, will result in a full training set of predictions. We'll also need the test set. This we can accomplish by training our 1st stage methods on the whole training set, and predicting the test set. This part is just normal _p_-fold CV.Now we have a training set and a test set for the ensemble, so we'll train, predict, and test it. This completes the first fold! After repeating the same process on each of the _p_ folds, we'll have predictions of the neural response for the entire dataset, and _p_ psuedo-$R^2^ scores for the ensemble. We'll have trained the 1st stage method _p(k+1)_ times throughout this process, so it can be quite computationally demanding. It's up to the user to decide if this computational cost is worth the guarantee that the ensemble's score is not inflated because of information leakage.
###Code
# These two methods implement the above scheme. We don't want to be forced to run the ensemble
# at the same time as we train the other methods on each fold, so we'll save the 1st-stage predictions for later
# and use separate methods for training a 1st-stage method and the 2nd-stage method. This will make more sense
# when we implement this.
# Basically, the first method is used to train a 1st-stage method, and the 2nd to train a 2nd-stage method.
def fit_nested_cv(X, Y, algorithm = 'glm_pyglmnet', n_cv_outer=5,n_cv_inner=5, verbose=1):
"""Outputs a list of n_cv_outer prediction vectors Yt_hats, each with length size(Y).
n_cv_outer is p, in the notation above, and n_cv_inner is k.
Each prediction vector will be used to train and test a single fold of the ensemble
in the method `ensemble_cv`. """
if np.ndim(X)==1:
X = np.transpose(np.atleast_2d(X))
# indices of outer test/train split for each fold
# It is imperative that the random state be identical to the random state of the Kfold used
# in ensemble_cv
cv_kf = KFold(n_splits=n_cv_outer, shuffle=True, random_state=42)
skf = cv_kf.split(X)
i=1
Y_hat=np.zeros((len(Y),n_cv_outer))
pR2_cv = list()
# In outer loop, we rotate the test set through the full dataset
for idx_r, idx_t in skf:
if verbose > 1:
print( '...runnning outer cv-fold', i, 'of', n_cv_outer)
Xr_o = X[idx_r, :] # train set input
Yr_o = Y[idx_r] # train set output
Xt_o = X[idx_t, :] # test set input
Yt_o = Y[idx_t] # test set output (used for scoring ensemble only)
cv_kf_in = KFold(n_splits=n_cv_inner, shuffle=True, random_state=42)
skf_inner = cv_kf_in.split(Xr_o)
j=1
# In the inner loop, we perform CV to predict the full validation set Yr_o, which will be recorded
# to be used for ensemble training. THEN we use the full Xr_o to predict values for Xt_o, which will
# be used for ensemble evaluation.
for idx_r_inner, idx_t_inner in skf_inner:
j+=1
Xr = Xr_o[idx_r_inner, :]
Yr = Yr_o[idx_r_inner]
Xt = Xr_o[idx_t_inner, :]
Yt = Yr_o[idx_t_inner]
# Predict a fold of the Yr_o (validation)
Yt_hat = eval(algorithm)(Xr, Yr, Xt)
full_indices = idx_r[idx_t_inner] # indices of inner loop
Y_hat[full_indices,i-1] = Yt_hat
Yt_hat.reshape(Yt.shape)
pR2 = poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))
pR2_cv.append(pR2)
if verbose > 1:
print( 'pR2: ', pR2)
# Now predict the ensemble's test set
Yt_hat = eval(algorithm)(Xr_o, Yr_o, Xt_o)
Y_hat[idx_t,i-1] = Yt_hat
pR2 = poisson_pseudoR2(Yt_o, Yt_hat, np.mean(Yr_o))
pR2_cv.append(pR2)
i+=1
if verbose > 0:
print("pR2_cv: %0.6f (+/- %0.6f)" % (np.mean(pR2_cv),
np.std(pR2_cv)/np.sqrt(n_cv_inner*n_cv_outer)))
return Y_hat, pR2_cv
def ensemble_cv(X_list, Y, algorithm = 'glm_pyglmnet', n_cv_outer=5, verbose=1):
"""Outputs the scores and prediction of the ensemble on held-out data.
X = list of (size(Y),n_cv_outer) np array. Each array is a previously trained method's predictions
all single folds. It should be of dimension (np.size(Y),n_cv_outer).
It is necessary that n_cv_outer and the random state of the KFold be the same as
in `fit_nested_cv`. """
for x in X_list:
assert x.shape == (np.size(Y),n_cv_outer)
# indices of outer test/train split for each fold
cv_kf = KFold(n_splits=n_cv_outer, shuffle=True, random_state=42)
skf = cv_kf.split(X_list[0])
i=0
Y_hat=np.zeros(len(Y))
pR2_cv = list()
for idx_r, idx_t in skf:
# Get the first fold from each list
X = np.array([x[:,i] for x in X_list])
X = X.transpose()
Xr = X[idx_r, :]
Yr = Y[idx_r]
Xt = X[idx_t, :]
Yt = Y[idx_t]
i+=1
if verbose > 1:
print( '...runnning cv-fold', i, 'of', n_cv_outer)
Yt_hat = eval(algorithm)(Xr, Yr, Xt)
Y_hat[idx_t] = Yt_hat
pR2 = poisson_pseudoR2(Yt, Yt_hat, np.mean(Yr))
pR2_cv.append(pR2)
if verbose > 1:
print( 'pR2: ', pR2)
if verbose > 0:
print("pR2_cv: %0.6f (+/- %0.6f)" % (np.mean(pR2_cv),
np.std(pR2_cv)/np.sqrt(n_cv_outer)))
return Y_hat, pR2_cv
###Output
_____no_output_____
###Markdown
Let's run this on the same neuron above and see if there's a difference in the ensemble score
###Code
methods = ['glm_pyglmnet','nn','xgb_run','rf','knn']
X_ens = list()
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_nested_cv(X, y, algorithm = method ,n_cv_outer=8)
Models[method+'_nested'] = dict()
Models[method+'_nested']['Yt_hat'] = Yt_hat
Models[method+'_nested']['PR2'] = PR2
# Put the previous results in a new data matrix
X_ens.append(Yt_hat)
print('Running ensemble...')
Yt_hat, PR2 = ensemble_cv(X_ens, y, algorithm = 'xgb_run', n_cv_outer=8, verbose=1)
Models['ens_nested'] = dict()
Models['ens_nested']['Yt_hat'] = Yt_hat
Models['ens_nested']['PR2'] = PR2
plot_model_comparison(['ens_engineered','ens_nested']
,models=Models, color=colors,labels = ['Ens. \n(not nested)','Ens. (nested)'])
###Output
_____no_output_____
###Markdown
__We can see that the nested CV scheme didn't make much of a difference on this dataset. For now we can assume that normal k-fold CV is not overfitting the data.__ Run over all neurons in the datasetIn the publication we tested 4 feature sets for the M1 dataset. Here we'll just run over the engineered feature set.This is currently set up to run normal CV scheme. This for for expediency; with this dataset and these methods, the nested CV scheme took our machine several hours to run.
###Code
methods = ['glm_pyglmnet','nn','xgb_run','rf','knn']
# We'll store both the predictions and the psuedo-R2 score in a dictionary for each
for method in methods:
Models[method+'_all'] = dict()
Models[method+'_all']['PR2'] = list()
Models[method+'_all']['Yt_hat'] = list()
Models['ens_all'] = dict()
Models['ens_all']['Yt_hat'] = list()
Models['ens_all']['PR2'] = list()
nneurons = np.shape(m1_imported['spikes'])[0]
for i in range(nneurons):
print '\n running for neuron %d' % i
y = m1_imported['spikes'][i]
for method in methods:
print('Running '+method+'...')
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method+'_all']['Yt_hat'].append(Yt_hat)
Models[method+'_all']['PR2'].append(PR2)
print('Running ensemble...')
# Put the previous results in a new data matrix
X_ens = list()
for method in methods:
X_ens.append(Models[method+'_all']['Yt_hat'][i])
X_ens = np.transpose(np.array(X_ens))
Yt_hat, PR2 = fit_cv(X_ens, y, algorithm = 'xgb_run', n_cv=8, verbose=1)
Models['ens_all']['Yt_hat'].append(Yt_hat)
Models['ens_all']['PR2'].append(PR2)
###Output
running for neuron 0
Running glm_pyglmnet...
pR2_cv: 0.019708 (+/- 0.002247)
Running nn...
pR2_cv: 0.025202 (+/- 0.003476)
Running xgb_run...
pR2_cv: 0.043901 (+/- 0.003588)
Running rf...
pR2_cv: 0.030623 (+/- 0.004210)
Running knn...
pR2_cv: -2.723586 (+/- 0.165410)
Running ensemble...
pR2_cv: 0.039113 (+/- 0.004420)
running for neuron 1
Running glm_pyglmnet...
pR2_cv: 0.050800 (+/- 0.001905)
Running nn...
pR2_cv: 0.107952 (+/- 0.003695)
Running xgb_run...
pR2_cv: 0.151781 (+/- 0.004155)
Running rf...
pR2_cv: 0.153035 (+/- 0.007690)
Running knn...
pR2_cv: -1.448570 (+/- 0.113684)
Running ensemble...
pR2_cv: 0.160966 (+/- 0.004447)
running for neuron 2
Running glm_pyglmnet...
pR2_cv: 0.068629 (+/- 0.002075)
Running nn...
pR2_cv: 0.081361 (+/- 0.003728)
Running xgb_run...
pR2_cv: 0.116009 (+/- 0.003960)
Running rf...
pR2_cv: 0.110817 (+/- 0.009056)
Running knn...
pR2_cv: -1.435584 (+/- 0.083983)
Running ensemble...
pR2_cv: 0.117260 (+/- 0.005539)
running for neuron 3
Running glm_pyglmnet...
pR2_cv: 0.040684 (+/- 0.001907)
Running nn...
pR2_cv: 0.076482 (+/- 0.003945)
Running xgb_run...
pR2_cv: 0.108575 (+/- 0.004181)
Running rf...
pR2_cv: 0.097349 (+/- 0.007634)
Running knn...
pR2_cv: -2.301393 (+/- 0.125503)
Running ensemble...
pR2_cv: 0.109097 (+/- 0.005146)
running for neuron 4
Running glm_pyglmnet...
pR2_cv: 0.024160 (+/- 0.002219)
Running nn...
pR2_cv: 0.189817 (+/- 0.003849)
Running xgb_run...
pR2_cv: 0.302463 (+/- 0.004459)
Running rf...
pR2_cv: 0.305592 (+/- 0.006707)
Running knn...
pR2_cv: 0.190224 (+/- 0.011230)
Running ensemble...
pR2_cv: 0.306045 (+/- 0.005913)
running for neuron 5
Running glm_pyglmnet...
pR2_cv: 0.002092 (+/- 0.000789)
Running nn...
pR2_cv: -0.007359 (+/- 0.005511)
Running xgb_run...
pR2_cv: 0.024298 (+/- 0.004774)
Running rf...
pR2_cv: -0.024255 (+/- 0.017313)
Running knn...
pR2_cv: -7.400815 (+/- 0.215506)
Running ensemble...
pR2_cv: 0.018893 (+/- 0.006812)
running for neuron 6
Running glm_pyglmnet...
pR2_cv: 0.045851 (+/- 0.001498)
Running nn...
pR2_cv: 0.050224 (+/- 0.001662)
Running xgb_run...
pR2_cv: 0.086977 (+/- 0.001950)
Running rf...
pR2_cv: 0.078311 (+/- 0.003805)
Running knn...
pR2_cv: -0.908422 (+/- 0.045085)
Running ensemble...
pR2_cv: 0.085858 (+/- 0.003321)
running for neuron 7
Running glm_pyglmnet...
pR2_cv: -0.000145 (+/- 0.000054)
Running nn...
pR2_cv: -0.009065 (+/- 0.007305)
Running xgb_run...
pR2_cv: -0.027098 (+/- 0.017731)
Running rf...
pR2_cv: -0.208098 (+/- 0.077388)
Running knn...
pR2_cv: -5.395353 (+/- 0.192988)
Running ensemble...
pR2_cv: -0.070074 (+/- 0.048760)
running for neuron 8
Running glm_pyglmnet...
pR2_cv: 0.002439 (+/- 0.000841)
Running nn...
pR2_cv: -0.016480 (+/- 0.008390)
Running xgb_run...
pR2_cv: -0.000922 (+/- 0.009676)
Running rf...
pR2_cv: -0.127746 (+/- 0.023388)
Running knn...
pR2_cv: -6.498334 (+/- 0.155970)
Running ensemble...
pR2_cv: -0.000204 (+/- 0.006620)
running for neuron 9
Running glm_pyglmnet...
###Markdown
This is a method for nicely plotting the scores of methods over an entire dataset:
###Code
import mpl_toolkits.axes_grid.inset_locator as il
def bootstrap(yy):
# sample with replacement
means = []
N = 1000
for i in range(N):
yy_samp = np.random.choice(yy,len(yy))
means.append(np.mean(yy_samp))
means = np.sort(means)
crit05 = int(0.025*N)
crit95 = int(0.975*N)
return np.abs(np.mean(yy)-means[[crit05,crit95]])
def plot_scatter(models_,ax_ran=[0, 0.2, 0, 0.2],ts=16,models = Models,labels = None):
"""Plots scores as a scatter plot with an inset of the population mean.
models_ = list of strings of the methods to plot. First in the list is the x-axis, rest are scatter.
The strings should be keys in the dict `models`.
ax_ran = the plotting range. Given to plt.axis
ts = label size
models = dictionary that contains results of methods.
labels = list of labels for inset bar chart. Defaults to the keys of the dictionary. """
if labels is None:
labels = models_
glm_ = models_[0]
sn = np.sqrt(len(np.array(models[glm_]['PR2'][1])))
xx = np.mean(np.array(models[glm_]['PR2']), axis=1)
xerr = np.std(np.array(models[glm_]['PR2']), axis=1)/sn
alpha_lines = 0.3
alpha = 1
lim = ax_ran[1]
i=0
p = []
means=[np.mean(xx)]
mean_err = bootstrap(xx)
for mod in models_[1:]:
yy = np.mean(np.array(models[mod]['PR2']), axis=1)
yerr = np.std(np.array(models[mod]['PR2']), axis=1)/sn
## uncomment to plot error
#p1,c,b=plt.errorbar(xx, yy,fmt=marks[i], xerr = xerr, yerr = yerr ,ms=msize[i], alpha=1,color=colors[i+1])
p1,=plt.plot(xx, yy, '.', ms=10, alpha=.5,color=colors[i+1])
p.append(p1)
plt.plot([-lim, lim], [-lim, lim], 'k:', lw=1, alpha=alpha_lines)
i+=1
means.append(np.mean(yy))
mean_err = np.vstack((mean_err,bootstrap(yy)))
ax = plt.gca()
simpleaxis(ax)
plt.xlabel('GLM pseudo-$R^2$',fontsize=ts)
plt.ylabel('Modern ML pseudo-$R^2$',fontsize=ts)
plt.axis(ax_ran)
ax.set_yticks([0], minor=True)
ax.set_xticks([0], minor=True)
plt.grid(which='minor')
# insert bar chart that is the average
freq = np.round(1.5*np.max(means)/5,2)
if freq<0.01:
freq = np.round(np.max(means)/5,3)
inset_axes = il.inset_axes(ax,
width="40%", # width = 34% of parent_bbox
height="50%", # height : 1 inch
loc=7, borderpad = 1.5,
axes_kwargs = {'title':'Mean p$R^2$',
'yticks':np.arange(0,np.max(means)*1.5,freq),
'ylim':[-np.max(means)*.1,np.max(means)*1.5] })
plt.bar(np.arange(np.size(means)), means, 0.8, align='center',
ecolor='k', alpha=.9, color=colors, ec='w',yerr = mean_err.transpose(),
tick_label=labels)
plt.plot([-.6,len(labels)-.6],[0,0],color='black',)
plot_scatter(['glm_pyglmnet_all','nn_all','xgb_run_all','ens_all'],ax_ran=[0, 0.3, 0, 0.3],ts=16,
models = Models,labels = ['GLM','NN','XGB','Ens.'])
plt.show()
###Output
_____no_output_____
###Markdown
Note that the GLM has not been optimized on this feature set for this tutorial. We managed in the paper to bring it to the level of the NN. Our parameters for XGBoost could be improved, too. Here we observe that it performs better in general (as seen in the scatter) but that some outliers were overfit and are pulling down the mean scores. To improve the methods, we can optimize their parameters with the following method in Appendix 1. Appendix 1: Hyperparameter optimizationEach of the above methods has a number of parameters that can be changed. Selecting the correct values isn't trivial, and can have a very large effect on the output score. Since the 'model parameters' already means something else, these tunable parameters are called 'hyperparameters'. Scikit-learn has number of methods for finding hyperpameters ("see here"[http://scikit-learn.org/stable/modules/grid_search.html]). These include grid and randomized search.We're a fan of Bayesian Optimization, since it seems a bit smarter. There are a number of implementations of this; we chose to use BayesOpt. __(Running this method will require installing this "package"[https://github.com/fmfn/BayesianOptimization])__ The following method is given as an example. The method is a bit convoluted and perphaps messy (sorry!) but is designed for ease-of-use. Here, we optimize for XGBoost.
###Code
from bayes_opt import BayesianOptimization
from sklearn.cross_validation import train_test_split
def bayes_opt_xgb(X,y,xgbBO=None, num_iter=10):
"""Return an optmized XGB for a dataset. Allows warm start; can pass Bayes_opt object from previous run."""
# Optimize on a held-out set.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
params=dict()
# BayesOpt optimizes any function that takes parameters and returns a score.
# We need to define a function like this for XGBoost. That's what xgb_evaluate does.
def xgb_evaluate(min_child_weight,
colsample_bytree,
max_depth,
subsample,
gamma,
learning_rate,
n_estimators):
params['min_child_weight'] = int(min_child_weight)
params['colsample_bytree'] = max(min(colsample_bytree, 1), 0)
params['max_depth'] = int(max_depth)
params['subsample'] = max(min(subsample, 1), 0)
params['gamma'] = max(gamma, 0)
params['learning_rate'] = max(learning_rate, 0)
params['n_estimators'] = max(n_estimators, 0)
# This actually creates the instance of XGBoost. It will be fed to fit_cv, which will output the score.
def xgb_new(Xr, Yr, Xt):
params['objective']= "count:poisson" #for poisson output
params['eval_metric']= "logloss" #loglikelihood loss
params['seed']= 2925 #for reproducibility
params['silent']= 1
params['nthread'] = -1 #with -1 it will use all available threads
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
num_round = 200
bst = xgb.train(params, dtrain, num_round)
Yt = bst.predict(dtest)
return Yt
Yt_hat, PR2 = fit_cv(X_train, y_train, xgb_new, n_cv=6, verbose=0, label=[])
return np.mean(PR2)
random_state = 2016
if not xgbBO: #allow warm start. If starting from scratch, do:
init_points=1
# Here is where we set the bounds of the parameters to explore.
xgbBO = BayesianOptimization(xgb_evaluate, {'min_child_weight': (1, 10),
'colsample_bytree': (1, 1),
'max_depth': (1, 10),
'subsample': (0.5, 1),
'gamma': (0, 0.4),
'learning_rate': (0.05, 0.25),
'n_estimators':(10, 1000)
},verbose=1)
# can place results of previous runs here to improve performance
xgbBO.explore( {'colsample_bytree': [1,1.0,1,1],
'gamma': [0.21173851024558027,0.4,0.2199632,0.18957],
'learning_rate': [0.10848417655111556,0.05,0.0952752,0.099619],
'max_depth': [2.097849266768804,8.1, 1.3364514,2.39364],
'min_child_weight': [8.85532939789740331,1,3.06446781,6.33840],
'n_estimators': [900.3009605914325,511,190.8645, 925.70248],
'subsample': [0.83434308305954963,0.5,0.85830945,0.798837]})
else:
init_points = 2
# This is the actual optimization method.
xgbBO.maximize(init_points=init_points, n_iter=num_iter, xi=0.0)
values = xgbBO.res['max']
print(values)
#want to return an optmized method for later use!! We can also see the parameters by typing xgbBO.res['max']
params = values['max_params']
def xgb_best(Xr, Yr, Xt,returnModel=0):
params['min_child_weight'] = int(params['min_child_weight'])
params['max_depth'] = int(params['max_depth'])
params['objective']= "count:poisson" #for poisson output
params['eval_metric']= "logloss" #loglikelihood loss
params['seed']= 2925 #for reproducibility
params['silent']= 1
params['nthread'] = -1 #with -1 it will use all available threads
params['colsample_bytree']= 1.0
dtrain = xgb.DMatrix(Xr, label=Yr)
dtest = xgb.DMatrix(Xt)
num_round = 200
bst = xgb.train(params, dtrain, num_round)
Yt = bst.predict(dtest)
if returnModel:
return bst
else:
return Yt
return xgb_best, xgbBO
###Output
_____no_output_____
###Markdown
This method can be used as follows.
###Code
XGB_optimized, xgbBO_optimized = bayes_opt_xgb(X,y,num_iter=5)
Yt_hat, PR2 = fit_cv(X, y, algorithm = 'XGB_optimized', n_cv=10, verbose=1, label=[])
###Output
_____no_output_____
###Markdown
Appendix 2: GLM implemented with R's glmnet.Since the GLM depends on a proper learning rate, regularization path, etc., it can be helpful to compare with other packages with different defaults. The package in R (glmnet) is pretty robust. If it is desired to compare the two GLMs, one can use R's glmnet in python with the following method (designed to interface with `fit_cv`).This requires the installation of rpy2.
###Code
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri as n2r
n2r.activate()
r = ro.r
r.library('glmnet')
def r_glmnet(Xr,Yr,Xt):
yr = ro.FloatVector(Yr)
trained_model = r['glmnet'](Xr, yr, family="poisson",alpha = 0.1)
lambda_min = np.asanyarray(trained_model.rx2('lambda'))[-1]
Yt = r['predict'](trained_model,s=lambda_min,newx=Xt,type="response")
# betas = np.asanyarray(r['as.matrix'](trained_model.rx2('beta')))[:,-1]
# beta0 = np.asanyarray(r['as.matrix'](trained_model.rx2('a0')))[-1]
# print(betas,beta0)
return np.array(list(Yt))
###Output
_____no_output_____ |
T11_CC_DJ_Genism_NLP_Coding_Challenge__2.ipynb | ###Markdown
[View in Colaboratory](https://colab.research.google.com/github/schwaaweb/aimlds1_11-NLP/blob/master/T11_CC_DJ_Genism_NLP_Coding_Challenge__2.ipynb) Coding Challenge 2: Natural Language Processing A common task in NLP is to determine the similarity between documents or words. In order to facilitate the comparison between documents or words, you will leverage the learnings from Coding Challenge 1 to create vectors. Once you have a document term matrix, comparisons are possible since you can measure the difference between the numbers.In this Coding Challenge, you will utilize the "**Gensim**" library, which is a free Python library to determine document similarity.**"Gensim" Reference**: https://radimrehurek.com/project/gensim/ **Install Gensim**:
###Code
%%time
# https://radimrehurek.com/gensim/install.html
#!pip install --upgrade gensim
!conda install -c anaconda gensim --yes
###Output
Solving environment: done
## Package Plan ##
environment location: /Users/darwinm/anaconda3
added / updated specs:
- gensim
The following packages will be downloaded:
package | build
---------------------------|-----------------
smart_open-1.5.7 | py36_0 58 KB anaconda
boto3-1.7.32 | py36_0 110 KB anaconda
bz2file-0.98 | py36_0 12 KB anaconda
certifi-2018.4.16 | py36_0 142 KB anaconda
openssl-1.0.2o | h26aff7b_0 3.4 MB anaconda
conda-4.5.4 | py36_0 1.0 MB anaconda
s3transfer-0.1.13 | py36_0 76 KB anaconda
ca-certificates-2018.03.07 | 0 124 KB anaconda
botocore-1.10.32 | py36_0 3.1 MB anaconda
gensim-3.4.0 | py36h917ab60_0 21.5 MB anaconda
jmespath-0.9.3 | py36h767a2d6_0 34 KB anaconda
------------------------------------------------------------
Total: 29.5 MB
The following NEW packages will be INSTALLED:
boto3: 1.7.32-py36_0 anaconda
botocore: 1.10.32-py36_0 anaconda
bz2file: 0.98-py36_0 anaconda
gensim: 3.4.0-py36h917ab60_0 anaconda
jmespath: 0.9.3-py36h767a2d6_0 anaconda
s3transfer: 0.1.13-py36_0 anaconda
smart_open: 1.5.7-py36_0 anaconda
The following packages will be UPDATED:
certifi: 2018.4.16-py36_0 conda-forge --> 2018.4.16-py36_0 anaconda
conda: 4.5.4-py36_0 conda-forge --> 4.5.4-py36_0 anaconda
openssl: 1.0.2o-0 conda-forge --> 1.0.2o-h26aff7b_0 anaconda
The following packages will be DOWNGRADED:
ca-certificates: 2018.4.16-0 conda-forge --> 2018.03.07-0 anaconda
Downloading and Extracting Packages
smart_open-1.5.7 | 58 KB | ####################################### | 100%
boto3-1.7.32 | 110 KB | ####################################### | 100%
bz2file-0.98 | 12 KB | ####################################### | 100%
certifi-2018.4.16 | 142 KB | ####################################### | 100%
openssl-1.0.2o | 3.4 MB | ####################################### | 100%
conda-4.5.4 | 1.0 MB | ####################################### | 100%
s3transfer-0.1.13 | 76 KB | ####################################### | 100%
ca-certificates-2018 | 124 KB | ####################################### | 100%
botocore-1.10.32 | 3.1 MB | ####################################### | 100%
gensim-3.4.0 | 21.5 MB | ####################################### | 100%
jmespath-0.9.3 | 34 KB | ####################################### | 100%
Preparing transaction: done
Verifying transaction: done
Executing transaction: done
CPU times: user 8.05 s, sys: 5.08 s, total: 13.1 s
Wall time: 5min 19s
###Markdown
**Install NLTK:**
###Code
%%time
# Import the NLTK package
import nltk
# Get all the data associated with NLTK – could take a while to download all the data
nltk.download('all')
###Output
[nltk_data] Downloading collection 'all'
[nltk_data] |
[nltk_data] | Downloading package abc to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package abc is already up-to-date!
[nltk_data] | Downloading package alpino to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package alpino is already up-to-date!
[nltk_data] | Downloading package biocreative_ppi to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package biocreative_ppi is already up-to-date!
[nltk_data] | Downloading package brown to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package brown is already up-to-date!
[nltk_data] | Downloading package brown_tei to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package brown_tei is already up-to-date!
[nltk_data] | Downloading package cess_cat to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package cess_cat is already up-to-date!
[nltk_data] | Downloading package cess_esp to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package cess_esp is already up-to-date!
[nltk_data] | Downloading package chat80 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package chat80 is already up-to-date!
[nltk_data] | Downloading package city_database to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package city_database is already up-to-date!
[nltk_data] | Downloading package cmudict to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package cmudict is already up-to-date!
[nltk_data] | Downloading package comparative_sentences to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package comparative_sentences is already up-to-
[nltk_data] | date!
[nltk_data] | Downloading package comtrans to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package comtrans is already up-to-date!
[nltk_data] | Downloading package conll2000 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package conll2000 is already up-to-date!
[nltk_data] | Downloading package conll2002 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package conll2002 is already up-to-date!
[nltk_data] | Downloading package conll2007 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package conll2007 is already up-to-date!
[nltk_data] | Downloading package crubadan to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package crubadan is already up-to-date!
[nltk_data] | Downloading package dependency_treebank to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package dependency_treebank is already up-to-date!
[nltk_data] | Downloading package dolch to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package dolch is already up-to-date!
[nltk_data] | Downloading package europarl_raw to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package europarl_raw is already up-to-date!
[nltk_data] | Downloading package floresta to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package floresta is already up-to-date!
[nltk_data] | Downloading package framenet_v15 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package framenet_v15 is already up-to-date!
[nltk_data] | Downloading package framenet_v17 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package framenet_v17 is already up-to-date!
[nltk_data] | Downloading package gazetteers to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package gazetteers is already up-to-date!
[nltk_data] | Downloading package genesis to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package genesis is already up-to-date!
[nltk_data] | Downloading package gutenberg to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package gutenberg is already up-to-date!
[nltk_data] | Downloading package ieer to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package ieer is already up-to-date!
[nltk_data] | Downloading package inaugural to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package inaugural is already up-to-date!
[nltk_data] | Downloading package indian to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package indian is already up-to-date!
[nltk_data] | Downloading package jeita to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package jeita is already up-to-date!
[nltk_data] | Downloading package kimmo to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package kimmo is already up-to-date!
[nltk_data] | Downloading package knbc to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package knbc is already up-to-date!
[nltk_data] | Downloading package lin_thesaurus to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package lin_thesaurus is already up-to-date!
[nltk_data] | Downloading package mac_morpho to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package mac_morpho is already up-to-date!
[nltk_data] | Downloading package machado to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package machado is already up-to-date!
[nltk_data] | Downloading package masc_tagged to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package masc_tagged is already up-to-date!
[nltk_data] | Downloading package moses_sample to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package moses_sample is already up-to-date!
[nltk_data] | Downloading package movie_reviews to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package movie_reviews is already up-to-date!
[nltk_data] | Downloading package names to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package names is already up-to-date!
[nltk_data] | Downloading package nombank.1.0 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package nombank.1.0 is already up-to-date!
[nltk_data] | Downloading package nps_chat to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package nps_chat is already up-to-date!
[nltk_data] | Downloading package omw to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package omw is already up-to-date!
[nltk_data] | Downloading package opinion_lexicon to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package opinion_lexicon is already up-to-date!
[nltk_data] | Downloading package paradigms to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package paradigms is already up-to-date!
[nltk_data] | Downloading package pil to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package pil is already up-to-date!
[nltk_data] | Downloading package pl196x to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package pl196x is already up-to-date!
[nltk_data] | Downloading package ppattach to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package ppattach is already up-to-date!
[nltk_data] | Downloading package problem_reports to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package problem_reports is already up-to-date!
[nltk_data] | Downloading package propbank to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package propbank is already up-to-date!
[nltk_data] | Downloading package ptb to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package ptb is already up-to-date!
[nltk_data] | Downloading package product_reviews_1 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package product_reviews_1 is already up-to-date!
[nltk_data] | Downloading package product_reviews_2 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package product_reviews_2 is already up-to-date!
[nltk_data] | Downloading package pros_cons to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package pros_cons is already up-to-date!
[nltk_data] | Downloading package qc to /Users/darwinm/nltk_data...
[nltk_data] | Package qc is already up-to-date!
[nltk_data] | Downloading package reuters to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package reuters is already up-to-date!
[nltk_data] | Downloading package rte to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package rte is already up-to-date!
[nltk_data] | Downloading package semcor to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package semcor is already up-to-date!
[nltk_data] | Downloading package senseval to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package senseval is already up-to-date!
[nltk_data] | Downloading package sentiwordnet to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package sentiwordnet is already up-to-date!
[nltk_data] | Downloading package sentence_polarity to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package sentence_polarity is already up-to-date!
[nltk_data] | Downloading package shakespeare to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package shakespeare is already up-to-date!
[nltk_data] | Downloading package sinica_treebank to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package sinica_treebank is already up-to-date!
[nltk_data] | Downloading package smultron to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package smultron is already up-to-date!
[nltk_data] | Downloading package state_union to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package state_union is already up-to-date!
[nltk_data] | Downloading package stopwords to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package stopwords is already up-to-date!
[nltk_data] | Downloading package subjectivity to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package subjectivity is already up-to-date!
[nltk_data] | Downloading package swadesh to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package swadesh is already up-to-date!
[nltk_data] | Downloading package switchboard to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package switchboard is already up-to-date!
[nltk_data] | Downloading package timit to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package timit is already up-to-date!
[nltk_data] | Downloading package toolbox to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package toolbox is already up-to-date!
[nltk_data] | Downloading package treebank to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package treebank is already up-to-date!
[nltk_data] | Downloading package twitter_samples to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package twitter_samples is already up-to-date!
[nltk_data] | Downloading package udhr to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package udhr is already up-to-date!
[nltk_data] | Downloading package udhr2 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package udhr2 is already up-to-date!
[nltk_data] | Downloading package unicode_samples to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package unicode_samples is already up-to-date!
[nltk_data] | Downloading package universal_treebanks_v20 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package universal_treebanks_v20 is already up-to-
[nltk_data] | date!
[nltk_data] | Downloading package verbnet to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package verbnet is already up-to-date!
[nltk_data] | Downloading package webtext to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package webtext is already up-to-date!
[nltk_data] | Downloading package wordnet to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package wordnet is already up-to-date!
[nltk_data] | Downloading package wordnet_ic to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package wordnet_ic is already up-to-date!
[nltk_data] | Downloading package words to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package words is already up-to-date!
[nltk_data] | Downloading package ycoe to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package ycoe is already up-to-date!
[nltk_data] | Downloading package rslp to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package rslp is already up-to-date!
[nltk_data] | Downloading package maxent_treebank_pos_tagger to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package maxent_treebank_pos_tagger is already up-
[nltk_data] | to-date!
[nltk_data] | Downloading package universal_tagset to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package universal_tagset is already up-to-date!
[nltk_data] | Downloading package maxent_ne_chunker to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package maxent_ne_chunker is already up-to-date!
[nltk_data] | Downloading package punkt to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package punkt is already up-to-date!
[nltk_data] | Downloading package book_grammars to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package book_grammars is already up-to-date!
[nltk_data] | Downloading package sample_grammars to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package sample_grammars is already up-to-date!
[nltk_data] | Downloading package spanish_grammars to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package spanish_grammars is already up-to-date!
[nltk_data] | Downloading package basque_grammars to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package basque_grammars is already up-to-date!
[nltk_data] | Downloading package large_grammars to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package large_grammars is already up-to-date!
[nltk_data] | Downloading package tagsets to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package tagsets is already up-to-date!
[nltk_data] | Downloading package snowball_data to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package snowball_data is already up-to-date!
[nltk_data] | Downloading package bllip_wsj_no_aux to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package bllip_wsj_no_aux is already up-to-date!
[nltk_data] | Downloading package word2vec_sample to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package word2vec_sample is already up-to-date!
[nltk_data] | Downloading package panlex_swadesh to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package panlex_swadesh is already up-to-date!
[nltk_data] | Downloading package mte_teip5 to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package mte_teip5 is already up-to-date!
[nltk_data] | Downloading package averaged_perceptron_tagger to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package averaged_perceptron_tagger is already up-
[nltk_data] | to-date!
[nltk_data] | Downloading package perluniprops to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package perluniprops is already up-to-date!
[nltk_data] | Downloading package nonbreaking_prefixes to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package nonbreaking_prefixes is already up-to-date!
[nltk_data] | Downloading package vader_lexicon to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package vader_lexicon is already up-to-date!
[nltk_data] | Downloading package porter_test to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package porter_test is already up-to-date!
[nltk_data] | Downloading package wmt15_eval to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package wmt15_eval is already up-to-date!
[nltk_data] | Downloading package mwa_ppdb to
[nltk_data] | /Users/darwinm/nltk_data...
[nltk_data] | Package mwa_ppdb is already up-to-date!
[nltk_data] |
[nltk_data] Done downloading collection all
CPU times: user 3.51 s, sys: 1.37 s, total: 4.88 s
Wall time: 25.1 s
###Markdown
**Import the requiste NLTK packages:**
###Code
#Import word tokenizer
from nltk.tokenize import word_tokenize
###Output
_____no_output_____
###Markdown
**Dataset:**
###Code
#For the purposes of this challenge, each line represents a document. In all, there are 8 documents
raw_documents = ['The dog ran up the steps and entered the owner\'s room to check if the owner was in the room.',
'My name is Thomson Comer, commander of the Machine Learning program at Lambda school.',
'I am creating the curriculum for the Machine Learning program and will be teaching the full-time Machine Learning program.',
'Machine Learning is one of my favorite subjects.',
'I am excited about taking the Machine Learning class at the Lambda school starting in April.',
'When does the Machine Learning program kick-off at Lambda school?',
'The batter hit the ball out off AT&T park into the pacific ocean.',
'The pitcher threw the ball into the dug-out.']
###Output
_____no_output_____
###Markdown
**Step 1**: **Create a document that contains a list of tokens**
###Code
import re
tokenized_documents = [word_tokenize(w) for w in raw_documents]
print(tokenized_documents)
###Output
[['The', 'dog', 'ran', 'up', 'the', 'steps', 'and', 'entered', 'the', 'owner', "'s", 'room', 'to', 'check', 'if', 'the', 'owner', 'was', 'in', 'the', 'room', '.'], ['My', 'name', 'is', 'Thomson', 'Comer', ',', 'commander', 'of', 'the', 'Machine', 'Learning', 'program', 'at', 'Lambda', 'school', '.'], ['I', 'am', 'creating', 'the', 'curriculum', 'for', 'the', 'Machine', 'Learning', 'program', 'and', 'will', 'be', 'teaching', 'the', 'full-time', 'Machine', 'Learning', 'program', '.'], ['Machine', 'Learning', 'is', 'one', 'of', 'my', 'favorite', 'subjects', '.'], ['I', 'am', 'excited', 'about', 'taking', 'the', 'Machine', 'Learning', 'class', 'at', 'the', 'Lambda', 'school', 'starting', 'in', 'April', '.'], ['When', 'does', 'the', 'Machine', 'Learning', 'program', 'kick-off', 'at', 'Lambda', 'school', '?'], ['The', 'batter', 'hit', 'the', 'ball', 'out', 'off', 'AT', '&', 'T', 'park', 'into', 'the', 'pacific', 'ocean', '.'], ['The', 'pitcher', 'threw', 'the', 'ball', 'into', 'the', 'dug-out', '.']]
###Markdown
**Step 2: Use the document to create a dictionary - a dictionary maps every word to a number**
###Code
from gensim.corpora import Dictionary
dictt = Dictionary(tokenized_documents)
dict_tn = dictt.token2id
print(dict_tn)
###Output
{"'s": 0, '.': 1, 'The': 2, 'and': 3, 'check': 4, 'dog': 5, 'entered': 6, 'if': 7, 'in': 8, 'owner': 9, 'ran': 10, 'room': 11, 'steps': 12, 'the': 13, 'to': 14, 'up': 15, 'was': 16, ',': 17, 'Comer': 18, 'Lambda': 19, 'Learning': 20, 'Machine': 21, 'My': 22, 'Thomson': 23, 'at': 24, 'commander': 25, 'is': 26, 'name': 27, 'of': 28, 'program': 29, 'school': 30, 'I': 31, 'am': 32, 'be': 33, 'creating': 34, 'curriculum': 35, 'for': 36, 'full-time': 37, 'teaching': 38, 'will': 39, 'favorite': 40, 'my': 41, 'one': 42, 'subjects': 43, 'April': 44, 'about': 45, 'class': 46, 'excited': 47, 'starting': 48, 'taking': 49, '?': 50, 'When': 51, 'does': 52, 'kick-off': 53, '&': 54, 'AT': 55, 'T': 56, 'ball': 57, 'batter': 58, 'hit': 59, 'into': 60, 'ocean': 61, 'off': 62, 'out': 63, 'pacific': 64, 'park': 65, 'dug-out': 66, 'pitcher': 67, 'threw': 68}
###Markdown
**Step 3: Convert the list of tokens from the document (created above in Step 1) into a bag of words. The bag of words highlights the term frequency i.e. each element in the bag of words is the index of the word in the dictionary and the of times it occurs**
###Code
#bow = Dictionary.doc2bow(tokenized_documents.lower().split())
#bow = [[Dictionary.doc2bow(wrd.lower().split()) for wrd in lst] for lst in tokenized_documents]
corpus = [dictt.doc2bow(text) for text in tokenized_documents]
#dir(dictt)
print(corpus)
###Output
[[(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 2), (10, 1), (11, 2), (12, 1), (13, 4), (14, 1), (15, 1), (16, 1)], [(1, 1), (13, 1), (17, 1), (18, 1), (19, 1), (20, 1), (21, 1), (22, 1), (23, 1), (24, 1), (25, 1), (26, 1), (27, 1), (28, 1), (29, 1), (30, 1)], [(1, 1), (3, 1), (13, 3), (20, 2), (21, 2), (29, 2), (31, 1), (32, 1), (33, 1), (34, 1), (35, 1), (36, 1), (37, 1), (38, 1), (39, 1)], [(1, 1), (20, 1), (21, 1), (26, 1), (28, 1), (40, 1), (41, 1), (42, 1), (43, 1)], [(1, 1), (8, 1), (13, 2), (19, 1), (20, 1), (21, 1), (24, 1), (30, 1), (31, 1), (32, 1), (44, 1), (45, 1), (46, 1), (47, 1), (48, 1), (49, 1)], [(13, 1), (19, 1), (20, 1), (21, 1), (24, 1), (29, 1), (30, 1), (50, 1), (51, 1), (52, 1), (53, 1)], [(1, 1), (2, 1), (13, 2), (54, 1), (55, 1), (56, 1), (57, 1), (58, 1), (59, 1), (60, 1), (61, 1), (62, 1), (63, 1), (64, 1), (65, 1)], [(1, 1), (2, 1), (13, 2), (57, 1), (60, 1), (66, 1), (67, 1), (68, 1)]]
###Markdown
**Step 4: Use the "*Gensim*" library to create a TF-IDF module for the bag of words**
###Code
from gensim import corpora, models, similarities
#dir(models)
tfidf = models.TfidfModel(corpus)
print(tfidf)
###Output
TfidfModel(num_docs=8, num_nnz=107)
###Markdown
**Step 5: a) Output the 5th document, b) Output the bag of words for the fifth document i.e. term frequency, c) Review the Inter Document Frequency (IDF) for each term in the bag of words for the 5th document**
###Code
tfidf[corpus[4]]
dir(tfidf)
tfidf()
###Output
_____no_output_____
###Markdown
**Step 6: Determine document similarity** - Identify the most similar document and the least similar document to the body of text below.*Good Reference for review*: https://radimrehurek.com/gensim/similarities/docsim.html
###Code
# Step 6
# Document to compare: "Machine Learning at Lambda school is awesome"
###Output
_____no_output_____ |
exercices/python_performance_assign.ipynb | ###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
# write your code here
###Output
_____no_output_____
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
# write fast method
# Creat two random lists of numbers for testing
# time the two methods
###Output
_____no_output_____
###Markdown
Cython exercises Exercise 1 1. load the cython extension. 2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell. 3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
# write your code here
###Output
_____no_output_____
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
# write your code here
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
# write your code here
###Output
_____no_output_____
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
pi = 0.
# Implement your code here
return pi
###Output
_____no_output_____
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
# Your code here
###Output
_____no_output_____
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
from time import time
import cProfile
import pstats
import profile
start = time()
profile = cProfile.Profile()
profile.enable()
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
profile.disable()
end = time()
print("It took", end - start, "seconds!")
profile.runcall(ft_concatenate, ["Alice", "Adventures", "in", "Wonderland"], "_")
ps = pstats.Stats(profile)
ps.print_stats()
%load_ext memory_profiler
%memit ft_concatenate(["Alice", "Adventures", "in", "Wonderland"], "_")
###Output
('It took', 0.00017189979553222656, 'seconds!')
19 function calls in 0.000 seconds
Random listing order was used
ncalls tottime percall cumtime percall filename:lineno(function)
2 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/utils/ipstruct.py:125(__getattr__)
2 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/hooks.py:204(pre_run_code_hook)
2 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/hooks.py:139(__call__)
2 0.000 0.000 0.000 0.000 /usr/lib/python2.7/codeop.py:132(__call__)
1 0.000 0.000 0.000 0.000 <ipython-input-1-66fc5ae485f2>:16(<module>)
2 0.000 0.000 0.000 0.000 {compile}
2 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/interactiveshell.py:1067(user_global_ns)
2 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/interactiveshell.py:2848(run_code)
1 0.000 0.000 0.000 0.000 <ipython-input-1-66fc5ae485f2>:10(<module>)
2 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 <ipython-input-1-66fc5ae485f2>:10(ft_concatenate)
peak memory: 41.96 MiB, increment: 0.14 MiB
###Markdown
Python’s strings were created to be immutable,that means that every time we use the + operator, Python is actually creating a new string based on both substrings and returning the new string. Consider that, in our case, this operation would be executed many times. Thus the function uses a significant amount of memory as shown below.
###Code
from time import time
import cProfile
import pstats
import profile
import numba
from numba import jit
profile = cProfile.Profile()
profile.enable()
@jit
def ft_concatenate_jit(l_strings, d):
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
profile.runcall(ft_concatenate_cython, ["Alice", "Adventures", "in", "Wonderland"], "_")
ps = pstats.Stats(profile)
ps.print_stats()
%timeit ft_concatenate_jit(["Alice", "Adventures", "in", "Wonderland"], "_")
###Output
32.1 µs ± 2.04 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
def bruteforce(list):
l1 = []
counter = 0
for elem in list:
if elem not in l1:
counter += 1
l1.append(elem)
return counter
def fast_method(list):
set1 = set(list)
return len(set1)
list1 = {1, 5, 8, 6, 6, 4, 20, 20, 20, 88, 8}
print("The number of distinct items of the first list using the ""bruteforce"" method is: ", bruteforce(list1))
print("The number of distinct items of the first list using the ""fast method"" is: ", fast_method(list1))
# time the two methods
%timeit bruteforce(list1)
%timeit fast_method(list1)
###Output
('The number of distinct items of the first list using the bruteforce method is: ', 7)
('The number of distinct items of the first list using the fast method is: ', 7)
The slowest run took 4.51 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.37 µs per loop
The slowest run took 12.61 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 397 ns per loop
###Markdown
The bruteforce function is not very efficient as it takes more time and space. Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext Cython
###Output
The Cython extension is already loaded. To reload it, use:
%reload_ext Cython
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
%%cython
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
###Output
/home/ubuntu/.local/lib/python2.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /home/ubuntu/.cache/ipython/cython/_cython_magic_954319a5c0b57cf03a2f8c0e0c77bd20.pyx
tree = Parsing.p_module(s, pxd, full_module_name)
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell. 3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
# write your code here
%timeit poly(20, 66)
%time poly_cy(20, 66)
###Output
The slowest run took 47.87 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 209 ns per loop
CPU times: user 240 µs, sys: 4 µs, total: 244 µs
Wall time: 22.8 ms
###Markdown
The slight speed up here between poly() and poly_cy() is maybe due to another process that is using up a lot of memory, that might leave less for my program, or it may mean more background work is required to clean up or reorganize memory space so all programs can work together. 4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
%%cython -a
cpdef int fib_cy(int n):
cdef int a = 1
cdef int b = 1
cdef int i
for i in range(n):
a, b = a + b, a
return a
###Output
/home/ubuntu/.local/lib/python2.7/site-packages/Cython/Compiler/Main.py:369: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /home/ubuntu/.cache/ipython/cython/_cython_magic_3ff9414de7cef44a53d841d5fc549386.pyx
tree = Parsing.p_module(s, pxd, full_module_name)
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
%timeit fib(20)
%timeit fib_cy(20)
###Output
The slowest run took 18.62 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 1.13 µs per loop
The slowest run took 21.20 times longer than the fastest. This could mean that an intermediate result is being cached.
10000000 loops, best of 3: 135 ns per loop
###Markdown
It's obviously the fib_cy() function is more faster than the fib() function. The reason why is that when we used Cython, typed variables in Cython generally lead to faster code execution. 5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
def fib_recursion(n):
if n <= 1:
return n
if n > 0:
return fib(n-1) + fib(n-2)
%timeit fib_recursion(20)
###Output
The slowest run took 21315.47 times longer than the fastest. This could mean that an intermediate result is being cached.
100000 loops, best of 3: 2.13 µs per loop
###Markdown
As we can see below, the recusrion version doesn't give more of an advantage comparing to the Cython version. Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
pi = 0.
interval = nsamples
circle_points = 0
square_points = 0
for i in range(interval):
rand_x = random.uniform(-1, 1)
rand_y = random.uniform(-1, 1)
distant = rand_x * rand_x + rand_y * rand_y
if(distant <= 1):
circle_points =+ 1
square_points =+ 1
pi = 4 * (circle_points / square_points)
return pi
%timeit monte_carlo_pi(100000)
from time import time
import cProfile
import pstats
from memory_profiler import profile
profile = cProfile.Profile()
profile.enable()
import random
def monte_carlo_pi(nsamples):
pi = 0.
interval = nsamples
circle_points = 0
square_points = 0
for i in range(interval):
rand_x = random.uniform(-1, 1)
rand_y = random.uniform(-1, 1)
distant = rand_x * rand_x + rand_y * rand_y
if(distant <= 1):
circle_points =+ 1
square_points =+ 1
pi = 4 * (circle_points / square_points)
return pi
profile.runcall(monte_carlo_pi, 100000)
ps = pstats.Stats(profile)
ps.print_stats()
###Output
400029 function calls in 0.275 seconds
Random listing order was used
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.000 0.000 {method 'enable' of '_lsprof.Profiler' objects}
3 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/hooks.py:204(pre_run_code_hook)
3 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/hooks.py:139(__call__)
200000 0.073 0.000 0.088 0.000 /usr/lib/python2.7/random.py:360(uniform)
3 0.000 0.000 0.043 0.014 /usr/lib/python2.7/codeop.py:132(__call__)
200000 0.016 0.000 0.016 0.000 {method 'random' of '_random.Random' objects}
1 0.000 0.000 0.000 0.000 <ipython-input-62-30e8a3eebe75>:9(<module>)
1 0.080 0.080 0.173 0.173 <ipython-input-62-30e8a3eebe75>:9(monte_carlo_pi)
3 0.043 0.014 0.043 0.014 {compile}
3 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/interactiveshell.py:1067(user_global_ns)
3 0.059 0.020 0.232 0.077 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/core/interactiveshell.py:2848(run_code)
3 0.000 0.000 0.000 0.000 /home/ubuntu/.local/lib/python2.7/site-packages/IPython/utils/ipstruct.py:125(__getattr__)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.173 0.173 <ipython-input-62-30e8a3eebe75>:24(<module>)
1 0.000 0.000 0.000 0.000 <ipython-input-62-30e8a3eebe75>:7(<module>)
1 0.000 0.000 0.173 0.173 /usr/lib/python2.7/cProfile.py:146(runcall)
1 0.004 0.004 0.004 0.004 {range}
###Markdown
As we can see below, the cumulative time fot the our function is 0.173s, and for the random method 0.016s but it had been called 2000000 times! So the time it took in general is far too high from the time the function need to be executed.So then, we can just call the method outside of the main function, as it does not depend on it.
###Code
%%cython -a
from libc.stdlib cimport rand
import random
cpdef float monte_carlo_pi_cy(int nsamples):
cdef int interval = nsamples
cdef int circle_points = 0
cdef int square_points = 0
cdef int i = 0
cdef float rand_x = 0
cdef float rand_y = 0
cdef distant = 0
for i in range(interval):
rand_x = rand() % 2
rand_y = rand() % 2
distant = rand_x * rand_x + rand_y * rand_y
if(distant <= 1):
circle_points =+ 1
square_points =+ 1
cdef float pi = 4 * (circle_points / square_points)
return pi
%timeit monte_carlo_pi_cy(100000)
###Output
100 loops, best of 3: 8.02 ms per loop
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
%load_ext line_profiler
d = "/"
lstring = ["Hello","World"]
%lprun -f ft_concatenate ft_concatenate(lstring,d)
d = "/"
lstring = ["Hello","World"]
def ft_concatenate2(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = d.join(l_strings)
return res
%lprun -f ft_concatenate2 ft_concatenate2(lstring,d)
%timeit ft_concatenate2
###Output
20.4 ns ± 1.4 ns per loop (mean ± std. dev. of 7 runs, 100000000 loops each)
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
%load_ext cython
%%cython
cpdef str ft_concatenate_cy(str l_strings, str d):
cdef str res = l_strings[0]
cdef str e
for e in l_strings[1:]:
res = res + d + e
return res
%timeit ft_concatenate('imane','//')
%timeit ft_concatenate_cy('imane','//')
###Output
545 ns ± 98.1 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
412 ns ± 16.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def bruteforce_m(L):
n=len(L)
S=[]
for i in range(n):
if(L[i] not in S):
S.append(L[i])
return len(S)
# write fast method
def fast_method(L):
L=dict.fromkeys(L)
return len(L)
# Creat two random lists of numbers for testing
import random
s= random.sample(range(1, 30), 5)
f= random.sample(range(1, 30), 5)
# time the two methods
%timeit bruteforce_m(s)
%timeit fast_method(f)
###Output
951 ns ± 64.2 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
409 ns ± 30 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
The cython extension is already loaded. To reload it, use:
%reload_ext cython
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell. 3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
%%cython
cpdef float poly_cy(float a,float b):
return ( 10.5 * a + 3 * (b**2))
%timeit y=poly_cy(5,2)
%timeit x= poly_cy(5,2)
###Output
98.5 ns ± 4.32 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
98.5 ns ± 0.761 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
%%cython
cpdef int fib_cy(int n):
cdef int a,b,i
for i in range(n):
a,b=a+b,a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
%timeit fib(20)
%timeit fib_cy(20)
###Output
1.17 µs ± 105 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
67.1 ns ± 4.29 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
def recur_fib(n):
if (n==0):
return 0
if (n==1):
return 1
return (recur_fib(n-1)+recur_fib(n-2))
%timeit recur_fib(20)
###Output
2.58 ms ± 318 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(N):
c=0
pi=0
s=0
for i in range(N):
x=random.random()
y=random.random()
d=x**2 + y**2
if (d<= 1):
c+=1
s+=1
pi = 4*c/s
return pi
monte_carlo_pi(1000000)
%load_ext cython
%%cython
from libc.stdlib cimport rand, srand, RAND_MAX
from libc.time cimport time
cpdef float monte_carlo_pi_cy(int N):
cdef int c=0
cdef int i
cdef float x, y, d, pi
cdef int s=0
srand(time(NULL))
for i in range(N):
x = float(rand())/float(RAND_MAX)
y = float(rand())/float(RAND_MAX)
d = x**2 + y**2
if d <= 1:
c+= 1
s+=1
pi = 4 * (c/s)
return pi
print(monte_carlo_pi_cy(1000000))
###Output
3.141688108444214
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
import random
import numba
from numba import jit
@jit(nopython=True)
def monte_carlo_pi_jit(N):
c=0
pi=0
s = 0
for i in range(N):
x=random.random()
y=random.random()
d=x**2 + y**2
if (d<= 1):
c+=1
s+=1
pi = 4*c/s
return pi
monte_carlo_pi_jit(1000000)
%timeit monte_carlo_pi(100000)
%timeit monte_carlo_pi_jit(100000)
###Output
36 ms ± 3.49 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
997 µs ± 15 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Exercise 2In the [Introduction to Quantitative Economics](https://python.quantecon.org/intro.html) with Python lecture series you can learn all about finite-state Markov chains.For now, let's just concentrate on simulating a very simple example of such a chain.Suppose that the volatility of returns on an asset can be in one of two regimes — high or low.The transition probabilities across states are as follows For example, let the period length be one day, and suppose the current state is high.We see from the graph that the state tomorrow will be- high with probability 0.8- low with probability 0.2Your task is to simulate a sequence of daily volatility states according to this rule.Set the length of the sequence to `n = 1_000_000` and start in the high state.Implement a pure Python version and a Numba version, and compare speeds.To test your code, evaluate the fraction of time that the chain spends in the low state.If your code is correct, it should be about 2/3.Hints:- Represent the low state as 0 and the high state as 1.- If you want to store integers in a NumPy array and then apply JIT compilation, use `x = np.empty(n, dtype=np.int_)`.
###Code
import random
import numpy as np
low_to_high = 0.1
high_to_low = 0.2
def seq(n):
x = np.empty(n, dtype = np.int_)
y = np.random.uniform(0, 1, size = n)
x[0] = 1
for i in range(0, n):
if(x[i] == 0):
x[i] = y[i] < low_to_high
else:
x[i] = y[i] > high_to_low
return x
x=seq(1000000)
print(x)
print(np.mean(x == 0))
%timeit seq(1000000)
import numba
import random
import numpy as np
from numba import jit
low_to_high = 0.1
high_to_low = 0.2
@jit(nopython = True)
def seq_jit(n):
x = np.empty(n, dtype = np.int_)
y = np.random.uniform(0, 1, size = n)
x[0] = 1
for i in range(1, n):
if(x[i-1] == 0):
x[i] = y[i] < low_to_high
else:
x[i] = y[i] > high_to_low
return x
%timeit seq_jit(1000000)
###Output
13.6 ms ± 777 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
ft_concatenate(['no','clue','what','im','doing'],'.')
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
%load_ext line_profiler
l_strings=['no','clue','what','im','doing']
d='.'
%lprun -f ft_concatenate ft_concatenate(l_strings, d)
'''
The line_profiler extension is already loaded. To reload it, use:
%reload_ext line_profiler
Timer unit: 1e-07 s
Total time: 3.05e-05 s
File: C:\Users\hp\AppData\Local\Temp/ipykernel_4672/3225034489.py
Function: ft_concatenate at line 1
Line # Hits Time Per Hit % Time Line Contents
==============================================================
1 def ft_concatenate(l_strings, d):
2 """concatenate list of strings into one string separated by delimeter"""
3 1 63.0 63.0 20.7 res = l_strings[0]
4 5 109.0 21.8 35.7 for e in l_strings[1:]:
5 4 117.0 29.2 38.4 res = res + d + e
6 1 16.0 16.0 5.2 return res
'''
#bottlrnecks present during the excution of the loop and the concatenation
def ft_concatenate(l_strings, d):
return d.join(l_strings)
%load_ext line_profiler
l_strings=['no','clue','what','im','doing']
d='.'
%lprun -f ft_concatenate ft_concatenate(l_strings, d)
###Output
The line_profiler extension is already loaded. To reload it, use:
%reload_ext line_profiler
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def distinct(list):
L=[]
for i in list:
if i not in L:
L.append(i)
return len(L)
# write fast method
def distinctopt(list):
lset = set(list)
return len(lset)
# Creat two random lists of numbers for testing
import random
rndml1 = []
rndml2 = []
for i in range(0,5):
rndml1.append(random.randint(1,10))
rndml1.append(random.randint(1,10))
# time the two methods
print("distinct() liste 1")
%timeit distinct(rndml1)
print("distinctopt() liste 1")
%timeit distinctopt(rndml1)
print("distinct() liste 2")
%timeit distinct(rndml1)
print("distinctopt() liste 2")
%timeit distinctopt(rndml1)
###Output
distinct() liste 1
3.25 µs ± 1.22 µs per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
distinctopt() liste 1
378 ns ± 72.2 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
distinct() liste 2
1.27 µs ± 70.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
distinctopt() liste 2
306 ns ± 7.61 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell.
###Code
%%cython -a
def poly_cy(float a,float b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
# write your code here
a=1
b=3
print("python")
%timeit poly(a,b)
print("cython")
%timeit poly_cy(a,b)
###Output
_____no_output_____
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
# write your code here
%%cython -a
def fib_cy(int n ):
cdef int a =1
cdef int b =1
for i in range(n):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
n=10
print("python")
%timeit fib(n)
print("cython")
%timeit fib_cy(n)
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
# write your code here
def recur_fib( n):
if n < 0:
print("use a unsigned integer")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return recur_fib( n-1 )+ recur_fib( n-2)
###Output
_____no_output_____
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
%%cython -a
import random
cdef extern from "limits.h":
int INT_MAX
def monte_carlo_pi_cy(int nsamples):
cdef int circle_points = 0
cdef int square_points = nsamples
cdef float a,b,x,y,d
for i in range(nsamples ):
a = rand()/float(INT_MAX)
b = rand()/float(INT_MAX)
x = -1 + 2 * a
y = -1 + 2 * b
d = x**2 + y**2
if d <= 1:
circle_points+=1
else:
pass
return 4*(circle_points/square_points)
###Output
_____no_output_____
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
# Your code here
###Output
_____no_output_____
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
import random
import numba
from numba import jit
@jit(nopython=True)
def monte_carlo_pi_numba(nsamples):
pi = 0.
interval = nsamples
circle_points = 0
square_points = 0
for i in range(interval):
rand_x = random.uniform(-1, 1)
rand_y = random.uniform(-1, 1)
distant = rand_x * rand_x + rand_y * rand_y
if(distant <= 1):
circle_points =+ 1
square_points =+ 1
pi = 4 * (circle_points / square_points)
return pi
%timeit monte_carlo_pi_numba(100000)
###Output
4.71 ms ± 2.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Exercise 2In the [Introduction to Quantitative Economics](https://python.quantecon.org/intro.html) with Python lecture series you can learn all about finite-state Markov chains.For now, let's just concentrate on simulating a very simple example of such a chain.Suppose that the volatility of returns on an asset can be in one of two regimes — high or low.The transition probabilities across states are as follows For example, let the period length be one day, and suppose the current state is high.We see from the graph that the state tomorrow will be- high with probability 0.8- low with probability 0.2Your task is to simulate a sequence of daily volatility states according to this rule.Set the length of the sequence to `n = 1_000_000` and start in the high state.Implement a pure Python version and a Numba version, and compare speeds.To test your code, evaluate the fraction of time that the chain spends in the low state.If your code is correct, it should be about 2/3.Hints:- Represent the low state as 0 and the high state as 1.- If you want to store integers in a NumPy array and then apply JIT compilation, use `x = np.empty(n, dtype=np.int_)`.
###Code
import random
import numpy as np
low_to_high = 0.1
high_to_low = 0.2
def seq(n):
x = np.empty(n, dtype = np.int_)
y = np.random.uniform(0, 1, size = n)
x[0] = 1
for i in range(1, n):
if(x[i-1] == 0):
x[i] = y[i] < low_to_high
else:
x[i] = y[i] > high_to_low
return x
x = seq(1000000)
print(np.mean(x == 0))
%timeit seq(1000000)
import numba
import random
import numpy as np
from numba import jit
low_to_high = 0.1
high_to_low = 0.2
@jit(nopython = True)
def seq_jit(n):
x = np.empty(n, dtype = np.int_)
y = np.random.uniform(0, 1, size = n)
x[0] = 1
for i in range(1, n):
if(x[i-1] == 0):
x[i] = y[i] < low_to_high
else:
x[i] = y[i] > high_to_low
return x
%timeit seq_jit(1000000)
###Output
18.4 ms ± 181 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
%load_ext line_profiler
d = ", "
lstring = ["This","sentence","is","seperated","by","delimeter"]
%lprun -f ft_concatenate ft_concatenate(lstring,d)
"""Timer unit: 1e-07 s
Total time: 2.88e-05 s
File: <ipython-input-1-9d0a5ce84371>
Function: ft_concatenate at line 1
Line # Hits Time Per Hit % Time Line Contents
==============================================================
1 def ft_concatenate(l_strings, d):
2 #concatenate list of strings into one string separated by delimeter
3 1 46.0 46.0 16.0 res = l_strings[0]
4 6 66.0 11.0 22.9 for e in l_strings[1:]:
5 5 63.0 12.6 21.9 res = res + d + e
6 1 113.0 113.0 39.2 return res"""
print('Ordinary case profiling Output')
# Speeding up the function using a built-in function
def ft_concatenate2(l_strings, d):
res = d.join(l_strings)
return res
%lprun -f ft_concatenate2 ft_concatenate2(lstring,d)
"""Timer unit: 1e-07 s
Total time: 9.2e-06 s
File: <ipython-input-9-e4d4cc699381>
Function: ft_concatenate2 at line 2
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2 def ft_concatenate2(l_strings, d):
3 1 69.0 69.0 75.0 res = d.join(l_strings)
4 1 23.0 23.0 25.0 return res"""
print('Speed up case profiling output')
###Output
Speed up case profiling output
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def bruteforcemethod(n,list_b):
T = []
for i in range(n):
if list_b[i] not in T:
T.append(list_b[i])
return len(T)
# write fast method
def fastmethod(list_f):
list_f = set(list_f)
return list_f
# Creat two random lists of numbers for testing
l1 = [1,3,7,9,6,9,3,5,2,1,0,8]
l2 = [0,0,0,0,4,0,4,4,4,0,4,0]
# time the two methods
print("bruteforce method with l1")
%timeit bruteforcemethod(12,l1)
print("fast method with l1")
%timeit fastmethod(l1)
print("bruteforce method with l2")
%timeit bruteforcemethod(12,l2)
print("fast method with l2")
%timeit fastmethod(l2)
###Output
bruteforce method with l1
3.49 µs ± 354 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
fast method with l1
589 ns ± 45.6 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
bruteforce method with l2
1.67 µs ± 272 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
fast method with l2
491 ns ± 49.6 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell.
###Code
%%cython -a
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
# write your code here
###Output
_____no_output_____
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
# write your code here
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
# write your code here
###Output
_____no_output_____
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
circle_pts = 0
square_pts = 0
for i in range(nsamples):
x = -1 + 2 * random.random()
y = -1 + 2 * random.random()
d = x**2 + y**2
if d <= 1:
circle_pts += 1
square_pts += 1
pi = 4 * (circle_pts/square_pts)
return pi
import time
%timeit monte_carlo_pi(1000000)
###Output
901 ms ± 31.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
from numba import jit
@jit(nopython=True)
def monte_carlo_pi_numba(nsamples):
circle_pts = 0
square_pts = 0
for i in range(nsamples):
x = -1 + 2 * random.random()
y = -1 + 2 * random.random()
d = x**2 + y**2
if d <= 1:
circle_pts += 1
square_pts += 1
pi = 4 * (circle_pts/square_pts)
return pi
print("Pure Python code")
%timeit monte_carlo_pi(1000000)
print("Numba code")
%timeit monte_carlo_pi_numba(1000000)
###Output
Pure Python code
943 ms ± 21.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
Numba code
17.8 ms ± 361 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Exercise 2In the [Introduction to Quantitative Economics](https://python.quantecon.org/intro.html) with Python lecture series you can learn all about finite-state Markov chains.For now, let's just concentrate on simulating a very simple example of such a chain.Suppose that the volatility of returns on an asset can be in one of two regimes — high or low.The transition probabilities across states are as follows For example, let the period length be one day, and suppose the current state is high.We see from the graph that the state tomorrow will be- high with probability 0.8- low with probability 0.2Your task is to simulate a sequence of daily volatility states according to this rule.Set the length of the sequence to `n = 1_000_000` and start in the high state.Implement a pure Python version and a Numba version, and compare speeds.To test your code, evaluate the fraction of time that the chain spends in the low state.If your code is correct, it should be about 2/3.Hints:- Represent the low state as 0 and the high state as 1.- If you want to store integers in a NumPy array and then apply JIT compilation, use `x = np.empty(n, dtype=np.int_)`.
###Code
import numpy as np
def volatility(n):
# Low state is 0
# High state is 1
x = np.empty(n, dtype=np.int_)
x[0] = 1
state = 1
for i in range(1,n):
if state == 1:
state = np.random.choice([1,0],p=[0.8,0.2])
if state == 1:
x[i] = 1
else:
x[i] = 0
state = 0
else:
state = np.random.choice([0,1],p=[0.9,0.1])
if state == 0:
x[i] = 0
else:
x[i] = 1
state = 1
return (len(x)-np.sum(x))/len(x)
volatility(1000000)
#Numba Version
@jit
def volatility_numba(n):
# Low state is 0
# High state is 1
x = np.empty(n, dtype=np.int_)
x[0] = 1
state = 1
for i in range(1,n):
if state == 1:
state = np.random.choice([1,0],p=[0.8,0.2])
if state == 1:
x[i] = 1
else:
x[i] = 0
state = 0
else:
state = np.random.choice([0,1],p=[0.9,0.1])
if state == 0:
x[i] = 0
else:
x[i] = 1
state = 1
return (len(x)-np.sum(x))/len(x)
print("Pure Python code")
%timeit volatility(1000000)
print("Numba code")
%timeit volatility_numba(1000000)
# This previous cell isen't executed because it take more time and memory than
# what my humble computer could handle XD
###Output
_____no_output_____
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
%load_ext line_profiler
d = ", "
lstring = ["Hello","World","I","am","alive"]
%lprun -f ft_concatenate ft_concatenate(lstring,d)
#Timer unit: 1e-06 s
#Total time: 1.6e-05 s
#File: <ipython-input-9-9d0a5ce84371>
#Function: ft_concatenate at line 1
#Line # Hits Time Per Hit % Time Line Contents
#==============================================================
# 1 def ft_concatenate(l_strings, d):
# 2 """concatenate list of strings into one string separated by delimeter"""
# 3 1 2.0 2.0 12.5 res = l_strings[0]
# 4 5 7.0 1.4 43.8 for e in l_strings[1:]:
# 5 4 6.0 1.5 37.5 res = res + d + e
# 6 1 1.0 1.0 6.2 return res
#This result shows that the loop and the concatenation take the majority of the time because we try to reach each
#element in the list, wich means we keep on traversing the elements of the list over and over, and so the total time
#is mostly never less than 1e-05 s
def ft_concatenate2(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = d.join(l_strings)
return res
%lprun -f ft_concatenate2 ft_concatenate2(lstring,d)
#Timer unit: 1e-06 s
#Total time: 3e-06 s
#File: <ipython-input-52-e8a8b95f7b97>
#Function: ft_concatenate2 at line 1
#Line # Hits Time Per Hit % Time Line Contents
#==============================================================
# 1 def ft_concatenate2(l_strings, d):
# 2 """concatenate list of strings into one string separated by delimeter"""
# 3 1 2.0 2.0 66.7 res = d.join(l_strings)
# 4 1 1.0 1.0 33.3 return res
#Here we used the join function which will reduce the time needed to loop in the list and to concatenate each element
#and that's why the total time is almost never over 7e-06 s
###Output
_____no_output_____
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce method
def bruteforce(n,l):
k = []
for i in range(n):
if l[i] not in k:
k.append(l[i])
return len(k)
# write fast method
def fast(l):
l = set(l)
return l
# Creat two random lists of numbers for testing
n1 = 10
l1 = [1,2,2,1,2,1,1,2,1,2]
n2 = 9
l2 = [1,2,3,4,5,6,7,8,9]
# time the two methods
print("bruteforce 1")
%timeit bruteforce(n1,l1)
print("fast 1")
%timeit fast(l1)
print("bruteforce 2")
%timeit bruteforce(n2,l2)
print("fast 2")
%timeit fast(l2)
#In both tests on the 2 functions, it is faster to use the set data structure rather than checking each and every
#element of the list if it's already there or not, and the results are obvious, while the bruteforce method is in
#the microseconds (1e-06 s), the fast method is in the nanoseconds (1e-09 s)
###Output
_____no_output_____
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell.
###Code
%%cython -a
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
# write your code here
print("Python Function")
%timeit poly(10,10)
print("Cython Function")
%timeit poly_cy(10,10)
#There is almost no speedup here, as the yellow lines are still fairly dark, which means that not much has been done
###Output
_____no_output_____
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
%%cython -a
def fib_cy(int n):
cdef int a = 1
cdef int b = 1
cdef int i
for i in range(n):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
print("Python Function")
%timeit fib(20)
print("Cython Function")
%timeit fib_cy(20)
#The difference is very obvious, while the python function is in the microseconds, the cython one is not over 100 ns
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
# write your code here
def rec_fib(n):
if n <= 1:
return n
else:
return (rec_fib(n-1) + rec_fib(n-2))
%%cython -a
def rec_fib_cy(int n):
if n <= 1:
return n
else:
return (rec_fib_cy(n-1) + rec_fib_cy(n-2))
print("Python Function")
%timeit rec_fib(20)
print("Cython Function")
%timeit rec_fib_cy(20)
#And here is the answer, making it recursive is basically making it slower, and that is because the function calls
#multiple same results, because it doesn't save them, so it gets a number, but then gets the same number in another
#call, and even cythonizing is not enough, because it is a problem with recursion as a whole
###Output
_____no_output_____
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
%%cython -a
from libc.stdlib cimport rand, srand, RAND_MAX
from libc.time cimport time
cpdef float monte_carlo_pi_cy(int nsamples):
cdef int cile_points = 0
cdef int square_points = 0
cdef int i
cdef float x, y, d, pi
srand(time(NULL))
for i in range(nsamples):
x = float(rand())/float(RAND_MAX)
y = float(rand())/float(RAND_MAX)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
print("Python Function")
%timeit monte_carlo_pi(100000)
print("Cython Function")
%timeit monte_carlo_pi_cy(100000)
###Output
Python Function
151 ms ± 8.87 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
Cython Function
5.22 ms ± 23.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
# Your code here
import random
def monte_carlo_pi(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
import random
import numba
from numba import jit
@jit(nopython=True)
def monte_carlo_pi_jit(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
print("Without Numba")
%timeit monte_carlo_pi(100000)
print("With Numba")
%timeit monte_carlo_pi_jit(100000)
###Output
Without Numba
170 ms ± 32.4 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
With Numba
2.02 ms ± 403 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Exercise 2In the [Introduction to Quantitative Economics](https://python.quantecon.org/intro.html) with Python lecture series you can learn all about finite-state Markov chains.For now, let's just concentrate on simulating a very simple example of such a chain.Suppose that the volatility of returns on an asset can be in one of two regimes — high or low.The transition probabilities across states are as follows For example, let the period length be one day, and suppose the current state is high.We see from the graph that the state tomorrow will be- high with probability 0.8- low with probability 0.2Your task is to simulate a sequence of daily volatility states according to this rule.Set the length of the sequence to `n = 1_000_000` and start in the high state.Implement a pure Python version and a Numba version, and compare speeds.To test your code, evaluate the fraction of time that the chain spends in the low state.If your code is correct, it should be about 2/3.Hints:- Represent the low state as 0 and the high state as 1.- If you want to store integers in a NumPy array and then apply JIT compilation, use `x = np.empty(n, dtype=np.int_)`.
###Code
import numpy as np
def volatility(n):
state = ["High","Low"]
transition = [["HH","HL"],["LL","LH"]]
trans_prob = [[0.8,0.2],[0.9,0.1]]
x = np.empty(n, dtype=np.int_)
x[0] = 1
prob = 1
state_now = "High"
for i in range(1,n):
if state_now == "High":
change = np.random.choice(transition[0],replace=True,p=trans_prob[0])
if change == "HH":
prob = prob * 0.8
x[i] = 1
else:
prob = prob * 0.2
x[i] = 0
state_now = "Low"
else:
change = np.random.choice(transition[1],replace=True,p=trans_prob[1])
if change == "LL":
prob = prob * 0.9
x[i] = 0
else:
prob = prob * 0.1
x[i] = 1
state_now = "High"
print((len(x)-np.sum(x))/len(x))
return
import numpy as np
import numba
from numba import jit
@jit
def volatility_jit(n):
state = ["High","Low"]
transition = [["HH","HL"],["LL","LH"]]
trans_prob = [[0.8,0.2],[0.9,0.1]]
x = np.empty(n, dtype=np.int_)
x[0] = 1
prob = 1
state_now = "High"
for i in range(1,n):
if state_now == "High":
change = np.random.choice(transition[0],replace=True,p=trans_prob[0])
if change == "HH":
prob = prob * 0.8
x[i] = 1
else:
prob = prob * 0.2
x[i] = 0
state_now = "Low"
else:
change = np.random.choice(transition[1],replace=True,p=trans_prob[1])
if change == "LL":
prob = prob * 0.9
x[i] = 0
else:
prob = prob * 0.1
x[i] = 1
state_now = "High"
print((len(x)-np.sum(x))/len(x))
return
print("Without Numba")
%timeit volatility(1000000)
print("With Numba")
%timeit volatility_jit(1000000)
###Output
Without Numba
0.667678
0.667091
0.666993
0.669062
0.665264
0.667759
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
#profile the function and identify the bottlenecks
%load_ext line_profiler
d = ", "
lstring = ["life","is","good"]
%lprun -f ft_concatenate ft_concatenate(lstring,d)
def ft_concatenate2(l_strings, d):
res = d.join(l_strings)
return res
%lprun -f ft_concatenate2 ft_concatenate2(lstring,d)
###Output
Timer unit: 1e-07 s
Total time: 6.5e-06 s
Could not find file C:\Users\BALADI\AppData\Local\Temp\ipykernel_604\4057333601.py
Are you sure you are running this program from the same directory
that you ran the profiler from?
Continuing without the function's contents.
Line # Hits Time Per Hit % Time Line Contents
==============================================================
1
2 1 54.0 54.0 83.1
3 1 11.0 11.0 16.9
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
#1: bruteforce_methode
def bruteforce(n,l):
k = []
for i in range(n):
if l[i] not in k:
k.append(l[i])
return len(k)
bruteforce(3,[2,2,4,3,4,6])
# write fast method
def fast(l):
l = set(l)
return l
fast([2,2,4,3,4,6])
n1 = 5
l1 = [3,4,2,4,1]
n2 = 6
l2 = [1,3,2,5,7,8]
# time the two methods
print("slow_method1")
%timeit bruteforce(n1,l1)
print("fast_method1")
%timeit fast(l1)
print("slow_method2")
%timeit bruteforce(n2,l2)
print("fast_method1")
%timeit fast(l2)
###Output
slow_method1
1.3 µs ± 203 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
fast_method1
321 ns ± 8.25 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
slow_method2
1.69 µs ± 169 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
fast_method1
376 ns ± 17.6 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
poly(4,6)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell.
###Code
import cython
def polycy (a:cython.float ,b:cython.float):
return 10.5 * a + 3 * (b**2)
polycy(4,6)
###Output
_____no_output_____
###Markdown
3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
print("Python Function")
%timeit poly(10,10)
print("Cython Function")
%timeit polycy(10,10)
###Output
Python Function
553 ns ± 41.8 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
Cython Function
518 ns ± 16 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
def fib_cy(n:cython.int):
a:cython.int =1
b:cython.int =1
i:cython.int
for i in range(n):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
print("Python Function")
%timeit fib(12)
print("Cython Function")
%timeit fib_cy(12)
###Output
Python Function
1.12 µs ± 30.7 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
Cython Function
1.15 µs ± 28.1 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
def rec_fib(n):
if n <= 1:
return n
else:
return (rec_fib(n-1) + rec_fib(n-2))
def rec_fib_cy(n:cython.int):
if(n<=1):
return n
else:
return (rec_fib(n-1) + rec_fib(n-2))
print("Python Function")
%timeit rec_fib(12)
print("Cython Function")
%timeit rec_fib_cy(12)
###Output
Python Function
68.1 µs ± 2.61 µs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
Cython Function
64.5 µs ± 1.99 µs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
###Markdown
the difference between recursive and iterative fubo function is huge in term of time and storage and even with cython the function take time in the execution and doesn't really resolve the prob of recursivity Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
import cython
from libc.stdlib cimport rand, srand, RAND_MAX
from libc.time cimport time
def float monte_carlo_pi_cy(nsamples:cython.int):
cile_points :cython.int = 0
square_points:cython.int = 0
int i:cython.int
x:cython.float
y:cython.float
d:cython.float
pi:cython.float
srand(time(NULL))
for i in range(nsamples):
x = float(rand())/float(RAND_MAX)
y = float(rand())/float(RAND_MAX)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
###Output
_____no_output_____
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
import random
def monte_carlo_pi(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
import random
import numba
from numba import jit
@jit(nopython=True)
def monte_carlo_pi_jit(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
print("Without Numba")
%timeit monte_carlo_pi(100000)
print("With Numba")
%timeit monte_carlo_pi_jit(100000)
###Output
Without Numba
142 ms ± 5.82 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
With Numba
2.22 ms ± 124 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
%load_ext line_profiler
d = " , "
l_strings = [ "this" ,"exercise" , "is" , "nice" ]
%lprun -f ft_concatenate ft_concatenate(l_strings , d)
#Timer unit: 1e-06 s
#Total time: 1e-05 s
#File: <ipython-input-4-9d0a5ce84371>
#Function: ft_concatenate at line 1
#Line # Hits Time Per Hit % Time Line Contents
#==============================================================
# 1 def ft_concatenate(l_strings, d):
# 2 """concatenate list of strings into one string separated by delimeter"""
# 3 1 2.0 2.0 20.0 res = l_strings[0]
# 4 4 5.0 1.2 50.0 for e in l_strings[1:]:
# 5 3 3.0 1.0 30.0 res = res + d + e
# 6 1 0.0 0.0 0.0 return res
#improve speed up of the function
def ft2_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = d.join(l_strings)
return res
%lprun -f ft2_concatenate ft2_concatenate(l_strings , d)
#Timer unit: 1e-06 s
#Total time: 8e-06 s
#File: <ipython-input-7-edcadd58f56c>
#Function: ft2_concatenate at line 26
#Line # Hits Time Per Hit % Time Line Contents
#==============================================================
# 26 def ft2_concatenate(l_strings, d):
#27 """concatenate list of strings into one string separated by delimeter"""
# 28 1 6.0 6.0 75.0 res = d.join(l_strings)
# 29 1 2.0 2.0 25.0 return res
###Output
The line_profiler extension is already loaded. To reload it, use:
%reload_ext line_profiler
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def bruteforce_method(l):
M=[]
n = len(l)
for i in range(n):
if l[i] not in M:
M.append(l[i])
return len(M)
# write fast methoddef fast_method(l):
def fast_method(l):
l = set(l)
return len(l)
import timeit
# Creat two random lists of numbers for testing
l=[5, 2, 3 ,2, 2, 3]
bruteforce_method(l)
fast_method(l)
# time the two methods
%timeit bruteforce_method(l)
%timeit fast_method(l)
###Output
953 ns ± 9.75 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
293 ns ± 12.5 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell. 3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
%%cython -a
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
import timeit
%timeit poly(5,9)
%timeit poly_cy(5,9)
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
%%cython -a
def fib_cy(n):
cdef int a = 1
cdef int b = 1
cdef int i
for i in range(n):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
%timeit fib(20)
%timeit fib_cy(20)
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
def rec_fib(n):
if n<=1:
return n
else:
return (rec_fib(n-1) + rec_fib(n-2))
%load_ext cython
%%cython -a
cpdef int rec_fib_cy(int n):
if n <= 1:
return n
else:
return (rec_fib_cy(n-1) + rec_fib_cy(n-2))
###Output
_____no_output_____
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
pi = 0
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d<=1:
cile_points+=1
square_points+=1
pi = 4*(cile_points /square_points)
return pi
%load_ext cython
%%cython -a
from libc.stdlib cimport rand, srand, RAND_MAX
from libc.time cimport time
def monte_carlo_pi(nsamples):
cdef int cile_points = 0
cdef int square_points = 0
cdef int i
cdef float x, y, d, pi
srand(time(NULL))
for i in range(nsamples):
x = float(rand())/float(RAND_MAX)
y = float(rand())/float(RAND_MAX)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
###Output
_____no_output_____
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
import random
def monte_carlo_pi(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
import random
import numba
from numba import jit
@jit(nopython=True)
def monte_carlo_pi_jit(nsamples):
cile_points = 0
square_points = 0
for i in range(nsamples):
x = random.uniform(0,1)
y = random.uniform(0,1)
d = x**2 + y**2
if d <= 1:
cile_points += 1
square_points += 1
pi = 4 * (cile_points/square_points)
return pi
print("without using Numba")
%timeit monte_carlo_pi(100)
print("using Numba")
%timeit monte_carlo_pi_jit(100)
###Output
_____no_output_____
###Markdown
Exercise 2In the [Introduction to Quantitative Economics](https://python.quantecon.org/intro.html) with Python lecture series you can learn all about finite-state Markov chains.For now, let's just concentrate on simulating a very simple example of such a chain.Suppose that the volatility of returns on an asset can be in one of two regimes — high or low.The transition probabilities across states are as follows For example, let the period length be one day, and suppose the current state is high.We see from the graph that the state tomorrow will be- high with probability 0.8- low with probability 0.2Your task is to simulate a sequence of daily volatility states according to this rule.Set the length of the sequence to `n = 1_000_000` and start in the high state.Implement a pure Python version and a Numba version, and compare speeds.To test your code, evaluate the fraction of time that the chain spends in the low state.If your code is correct, it should be about 2/3.Hints:- Represent the low state as 0 and the high state as 1.- If you want to store integers in a NumPy array and then apply JIT compilation, use `x = np.empty(n, dtype=np.int_)`.
###Code
import numpy as np
def volatility(n):
HL = ["high" , "low"]
transition = [["hh" , "hl"] , ["ll" , "lh"]]
L = [[0.8,0.2] , [0.9,0.1]]
x = np.empty(n , dtype = np.int_)
x[0] = 1
prob = 1
H = "high"
for i in range(1,n):
if H == "high":
change = np.random.choice(transition[0],replace=True,p=L[0])
if change == "hh":
prob = prob * 0.8
x[i] = 1
else:
prob = prob * 0.2
x[i] = 0
H = "low"
else:
change = np.random.choice(transition[1] , replace = True , p=L[1])
if change == "ll":
prob = prob * 0.9
x[i] = 0
else:
prob = prob * 0.1
x[i] = 1
H = "high"
print((len(x) -np.sum(x))/len(x))
return
import numpy as np
import numba
from numba import jit
@jit
def volatility_jit(n):
HL = ["high" , "low"]
transition = [["hh" , "hl"] , ["ll" , "lh"]]
L = [[0.8,0.2] , [0.9,0.1]]
x = np.empty(n , dtype = np.int_)
x[0] = 1
prob = 1
H = "high"
for i in range(1,n):
if H == "high":
change = np.random.choice(transition[0],replace=True,p=L[0])
if change == "hh":
prob = prob * 0.8
x[i] = 1
else:
prob = prob * 0.2
x[i] = 0
H = "low"
else:
change = np.random.choice(transition[1] , replace = True , p=L[1])
if change == "ll":
prob = prob * 0.9
x[i] = 0
else:
prob = prob * 0.1
x[i] = 1
H = "high"
print((len(x) -np.sum(x))/len(x))
return
###Output
_____no_output_____
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
import time
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
# write your code here
def ft_concatenate_opt(l_strings, d):
return d.join(l_strings)
start1 = time.time()
print(ft_concatenate("tytybety"," "))
end1 = time.time()
start2 = time.time()
print(ft_concatenate_opt("tytybety"," "))
end2 = time.time()
print("execution time for ft_concatenate is :",end1 -start1 , "seconds")
print("execution time for ft_concatenate_opt is :",end2 -start2 , "seconds")
###Output
t y t y b e t y
t y t y b e t y
execution time for ft_concatenate is : 0.0005350112915039062 seconds
execution time for ft_concatenate_opt is : 0.00012183189392089844 seconds
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def num_dist(L):
l = []
for i in range(len(L)):
if L[i] not in l:
l.append(L[i])
return len(l)
L =[5,2,3,2,2,3]
num_dist(L)
# write fast method
def num_dist_fast(L):
a = set(L)
return len(a)
L =[3,3,4,9,0,0,5,4]
num_dist_fast(L)
# Creat two random lists of numbers for testing
L2 =[3,3,4,9,0,0,5,4]
import time
# time the two methods
start1 = time.time()
print(num_dist(L2))
end1 = time.time()
start2 = time.time()
print(num_dist_fast(L2))
end2 = time.time()
print("execution time for num_dist is :",end1 -start1 , "seconds")
print("execution time for num_dist_fast is :",end2 -start2 , "seconds")
#sets aren't faster than lists
###Output
5
5
execution time for num_dist is : 0.0005159378051757812 seconds
execution time for num_dist_fast is : 7.510185241699219e-05 seconds
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext Cython
###Output
The Cython extension is already loaded. To reload it, use:
%reload_ext Cython
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell.
###Code
%%cython
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
# write your code here
import time
a = 1
b = 2
start1 = time.time()
poly(a,b)
end1 = time.time()
start2 = time.time()
poly_cy(a,b)
end2 = time.time()
print(" time performence of poly: ",end1 - start1, "seconds")
print(" time performence of poly_cy: ",end2 - start2, "seconds")
###Output
time performence of poly: 7.390975952148438e-05 seconds
time performence of poly_cy: 6.890296936035156e-05 seconds
###Markdown
factor of speed is : 4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b,a
return a
%load_ext Cython
%%cython
cpdef int fib_cy(int n):
cdef int a,b,i
for i in range(n):
a,b=a+b,a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
%timeit fib_cy(20)
%timeit fib(20)
###Output
65.4 ns ± 2.27 ns per loop (mean ± std. dev. of 7 runs, 10,000,000 loops each)
1.22 µs ± 25.4 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
%load_ext Cython
# write your code here
def fibo_rec(n):
if n <= 1:
return n
return(fibo_rec(n-1) + fibo_rec(n-2))
%%cython
cpdef int fibo_rec_opt(int n):
if n <= 1:
return n
return(fibo_rec_opt(n-1) + fibo_rec_opt(n-2))
%timeit fibo_rec(5)
%timeit fibo_rec_opt(5)
###Output
1.65 µs ± 11.8 ns per loop (mean ± std. dev. of 7 runs, 1,000,000 loops each)
75.3 ns ± 0.354 ns per loop (mean ± std. dev. of 7 runs, 10,000,000 loops each)
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
%load_ext Cython
import random
def monte_carlo_pi(nsamples):
pi = 0
INTERVAL= 1000
circle_points= 0
square_points= 0
for i in range(INTERVAL ** 2):
#generation of random x,y
x= random.uniform(-1,1)
y= random.uniform(-1,1)
d = x*x + y*y
if d <= 1:
circle_points +=1
square_points +=1
INTERVAL += 1
pi = 4* circle_points / square_points
return pi
monte_carlo_pi(1000)
%%cython
import random
cpdef float monte_carlo_pi_opt(int nsamples):
cdef int circle_points,square_points = 0
cdef int INTERVAL = 1000
cdef float x
cdef float y
cdef float d
cdef float pi
for i in range(INTERVAL ** 2):
#generation of random x,y
x = random.uniform(-1,1)
y = random.uniform(-1,1)
d = x*x + y*y
if d <= 1:
circle_points +=1
square_points +=1
INTERVAL += 1
pi = 4* circle_points / square_points
return pi
print(monte_carlo_pi_opt(1000))
%timeit monte_carlo_pi_opt(1000)
%timeit monte_carlo_pi(1000)
###Output
465 ms ± 31.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
912 ms ± 135 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
# Your code here
import random
from numba import jit
@jit
def monte_carlo_pi_nb(nsamples):
pi = 0
INTERVAL= 1000
circle_points= 0
square_points= 0
for i in range(INTERVAL ** 2):
#generation of random x,y
x= random.uniform(-1,1)
y= random.uniform(-1,1)
d = x*x + y*y
if d <= 1:
circle_points +=1
square_points +=1
INTERVAL += 1
pi = 4* circle_points / square_points
return pi
monte_carlo_pi(1000)
%timeit monte_carlo_pi_nb(3000)
%timeit monte_carlo_pi_opt(3000)
###Output
16.6 ms ± 660 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
521 ms ± 9.65 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
%load_ext line_profiler
%lprun -f ft_concatenate ft_concatenate(['hi', 'there','wassup'],' ')
#output: the program spend most the time calling an item in the list
'''Timer unit: 1e-06 s
Total time: 8e-06 s
File: /var/folders/cw/0t2jzw0d1dg1rj3qj57z9_qh0000gn/T/ipykernel_1330/45768699.py
Function: ft_concatenate at line 1
Line # Hits Time Per Hit % Time Line Contents
==============================================================
1 def ft_concatenate(l_strings, d):
2 """concatenate list of strings into one string separated by delimeter"""
3 1 2.0 2.0 25.0 res = l_strings[0]
4 3 3.0 1.0 37.5 for e in l_strings[1:]:
5 2 2.0 1.0 25.0 res = res + d + e
6 1 1.0 1.0 12.5 return res'''
def ft_concatenate2(l_strings, d):
return d.join(l_strings)
%lprun -f ft_concatenate2 ft_concatenate2(['hi', 'there','wassup'],' ')
'''Timer unit: 1e-06 s
Total time: 3e-06 s
File: /var/folders/cw/0t2jzw0d1dg1rj3qj57z9_qh0000gn/T/ipykernel_1330/3455553713.py
Function: ft_concatenate2 at line 2
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2 def ft_concatenate2(l_strings, d):
3 1 3.0 3.0 100.0 return d.join(l_strings)'''
###Output
_____no_output_____
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def bruteforce(L):
R=[]
for item in L:
if item not in R :
R.append(item)
return len(R)
# write fast method
def fast(L):
return len(set(L))
# Creat two random lists of numbers for test
import numpy as np
L=np.random.rand(10000)
# time the two methods
from timeit import timeit
print('the bruteforce function')
startB = timeit()
bruteforce(L)
endB = timeit()
print(endB - startB)
print('the fast function')
startF = timeit()
fast(L)
endF =timeit()
print(endF - startF)
#the fast method is pretty much the optimal one
###Output
the bruteforce function
-0.013194874999953754
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext Cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell.
###Code
%%cython -a
def poly_cy(float a, float b):
cdef float p
p= 10.5 * a + 3 * (b**2)
return p
###Output
_____no_output_____
###Markdown
3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
print('python func :')
startP=time.time()
poly(200,244)
endP=time.time()
print(endP-startP)
print('cython func :')
startC=time.time()
poly_cy(200,244)
endC=time.time()
print(endC-startC)
###Output
python func :
5.5789947509765625e-05
cython func :
4.696846008300781e-05
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
%%cython -a
def fib_cy(int n ):
cdef int a =1
cdef int b =1
for i in range(n):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
from time import time
print(fib(20))
print(fib_cy(20))
print('cython func :')
startP2=time()
fib(20)
endP2=time()
print(endP2-startP2)
print('cython func :')
startC2=time()
fib_cy(20)
endC2=time()
print(endC2-startC2)
###Output
17711
17711
cython func :
5.1975250244140625e-05
cython func :
4.887580871582031e-05
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
# write your code here
import time
def recur_fib( n):
if n < 0:
print("use a unsigned integer")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return recur_fib( n-1 )+ recur_fib( n-2)
s =time.time()
recur_fib(20)
e =time.time()
print(e-s)
%%cython -a
from time import time
def recur_fib_cy( int n):
if n < 0:
print("use a unsigned integer")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return recur_fib_cy( n-1 )+ recur_fib_cy( n-2)
s = time()
recur_fib_cy(20)
e = time()
print('timing for cython : ',e-s)
#cynthonizing any python function give much more advatange
###Output
timing for cython : 0.0009207725524902344
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate.
###Code
import random
from time import time
def monte_carlo_pi(nsamples):
circle_points = 0
square_points = nsamples
for i in range(nsamples ):
a = random.random()
b = random.random()
x = -1 + 2 * a
y = -1 + 2 * b
d = x**2 + y**2
if d <= 1:
circle_points+=1
else:
pass
return 4*(circle_points/square_points)
s = time()
pi = monte_carlo_pi(100000)
e = time()
print('timing in python', e-s)
print(pi)
###Output
timing in python 0.05779719352722168
3.14744
###Markdown
**Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
%%cython
from time import time
from libc.stdlib cimport rand
cdef extern from "limits.h":
int INT_MAX
def monte_carlo_pi_cy(int nsamples):
cdef int circle_points = 0
cdef int square_points = nsamples
cdef float a,b,x,y,d
for i in range(nsamples ):
a = rand()/float(INT_MAX)
b = rand()/float(INT_MAX)
x = -1 + 2 * a
y = -1 + 2 * b
d = x**2 + y**2
if d <= 1:
circle_points+=1
else:
pass
return 4*(circle_points/square_points)
s = time()
pi=monte_carlo_pi_cy(100000)
e = time()
print(pi)
print('timing : ',e-s)
###Output
/Users/grace/.ipython/cython/_cython_magic_6e8ecebf94ce72e3dd71c2ced63b003d.c:1367:7: warning: code will never be executed [-Wunreachable-code]
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
^~~~~~~~~~~~~~~
/Users/grace/.ipython/cython/_cython_magic_6e8ecebf94ce72e3dd71c2ced63b003d.c:1353:7: warning: code will never be executed [-Wunreachable-code]
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
^~~~~~~~~~~~~~~
2 warnings generated.
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
import random
import time
from numba import jit
@jit(nopython=True)
def monte_carlo_pi(nsamples):
circle_points = 0
square_points = nsamples
for i in range(nsamples ):
a = random.random()
b = random.random()
x = -1 + 2 * a
y = -1 + 2 * b
d = x**2 + y**2
if d <= 1:
circle_points+=1
else:
pass
return 4*(circle_points/square_points)
startP=time.time()
pi = monte_carlo_pi(100000)
endP=time.time()
print('timing using numba', endP-startP)
print(pi)
###Output
_____no_output_____
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation*
###Code
l_strings = ["Hey","How","Are","You"]
d=", "
print(ft_concatenate(l_strings,d))
%reload_ext line_profiler
%lprun -f ft_concatenate ft_concatenate(l_strings,d)
#Bottlenecks : Boucle for
def imp_concatenate(l_strings,d):
res = d.join(l_strings)
return res
print(imp_concatenate(l_strings,d))
%lprun -f imp_concatenate imp_concatenate(l_strings,d)
###Output
Hey, How, Are, You
Hey, How, Are, You
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:3**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce method
def brute_unique(l):
temp = []
for i in l:
if i not in temp:
temp.append(i)
return(len(temp))
l = [5,2,3,2,2,3]
print(brute_unique(l))
# write fast method
def fast_unique(l):
return(len(set(l)))
fast_unique(l)
# Creat two random lists of numbers for testing
t=[1,4,3,4,6,8,8,9,0,3,1,1,4,6]
# time the two methods
%timeit brute_unique(l)
%timeit fast_unique(l)
%timeit brute_unique(t)
%timeit fast_unique(t)
#Nous pouvons gagner beaucoup de temps en optimisant un programme. Ceci est remarquable lorsque nous avons beaucoup de données.
###Output
1.71 µs ± 50.1 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
684 ns ± 9.92 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
3.51 µs ± 126 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
957 ns ± 13.7 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%reload_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
from time import time
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell. 3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
%%cython -a
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
start1=time.time()
poly(250,300)
end1=time.time()
print(end1-start1)
start2=time.time()
poly_cy(200,244)
end2=time.time()
print(end2-start2)
###Output
Error compiling Cython file:
------------------------------------------------------------
...
startC=time.time()
#poly_cy(200,244)
endC=time.time()
print(endC-startC)
""""""
^
------------------------------------------------------------
C:\Users\exe\.ipython\cython\_cython_magic_9483644b4811d9fa368db9d2315a274b.pyx:17:0: Unexpected token None:'' in string literal
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n):
a, b = a + b, a
return a
%%cython -a
def fib_cy(int n):
cdef int a=1
cdef int b=1
for i in range(n):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
print(fib(20))
print(fib_cy(20))
start1=time()
fib(20)
end1=time()
print(end1-start1)
start2=time()
fib_cy(20)
end2=time()
print(endC2-startC2)
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
def recur_fib(n):
if n < 0:
print("L argument doit etre positif")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return recur_fib(n-1)+recur_fib(n-2)
start = time()
recur_fib(20)
end = time()
print(end - start)
%%cython -a
def recur_fib_cy( int n):
if n < 0:
print("L argument doit etre positif")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return recur_fib_cy(n-1)+recur_fib_cy(n-2)
start = time()
recur_fib_cy(20)
end = time()
print(end - start)
###Output
_____no_output_____
###Markdown
Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
pi = 0.
circle_points = 0
square_points = nsamples
for i in range(nsamples ):
a = random.random()
b = random.random()
x = -1 + 2 * a
y = -1 + 2 * b
pi = x**2 + y**2
if pi <= 1:
circle_points+=1
pi = 4*(circle_points/square_points)
return pi
start = time()
pi = monte_carlo_pi(1000000)
end = time()
print(end-start)
%%cython
from time import time
from libc.stdlib cimport rand
cdef extern from "limits.h":
int INT_MAX
def monte_carlo_pi_cy(int nsamples):
cdef int pi = 0
cdef int circle_points = 0
cdef int square_points = nsamples
cdef float a,b,x,y
for i in range(nsamples ):
a = rand()/float(INT_MAX)
b = rand()/float(INT_MAX)
x = -1 + 2 * a
y = -1 + 2 * b
pi = x**2 + y**2
if pi <= 1:
circle_points+=1
pi = 4*(circle_points/square_points)
return pi
start = time()
pi = monte_carlo_pi_cy(1000000)
end = time()
print(end-start)
###Output
_____no_output_____
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
# Your code here
###Output
_____no_output_____
###Markdown
Python performance exercises Python best practices exercises Exercise 1considering the following function for concatenating list strings with deliter.
###Code
def ft_concatenate(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
res = l_strings[0]
for e in l_strings[1:]:
res = res + d + e
return res
###Output
_____no_output_____
###Markdown
- profile the function and identify the bottlenecks.- improve speed up of the function*Hint: you may need to look to the string functions in python documentation* Profiling full script
###Code
x=10**70000
x=str(x)
%prun ft_concatenate(x,"1")
###Output
_____no_output_____
###Markdown
Line by line profiling
###Code
%load_ext line_profiler
x=10**70000
x=str(x)
%lprun -f ft_concatenate ft_concatenate(x,"1")
###Output
_____no_output_____
###Markdown
The new version of the function
###Code
def ft_concatenate_optim(l_strings, d):
"""concatenate list of strings into one string separated by delimeter"""
char=list(l_strings)
res=d.join(char)
return res
###Output
_____no_output_____
###Markdown
comparing the two functions
###Code
%timeit ft_concatenate(x,"1")
%timeit ft_concatenate_optim(x,"d")
###Output
_____no_output_____
###Markdown
Exercise 2In this exercise you will solve the following problem using two methods bruteforce mehode, and fast method.**Problem:** You are given a list of n integers, and your task is to calculate the number of distinct values in the list.**Example**- Input:52 3 2 2 3- Output:2**Implement the following methodes:**1. **bruteforce mehode:** create an empty list and start adding items for the given list without adding the previous item add, at the end the result list will contain unique values, print lenght of the list and you are done. 2. **fast method** think of using Set data structure.- time the two methods, what do you think?
###Code
# bruteforce fast method
def distinctValues(L):
res=[]
for i in L:
if i not in res:
res.append(i)
return len(res)
# write fast method
def distinctValues2(L):
res=set(L)
return len(res)
# Creat two random lists of numbers for testing
import random
L = [random.randrange(1, 1000, 1) for i in range(1000000)]
# time the two methods
%timeit distinctValues(L)
%timeit distinctValues2(L)
###Output
_____no_output_____
###Markdown
Cython exercises Exercise 1 1. load the cython extension.
###Code
%load_ext cython
###Output
_____no_output_____
###Markdown
2. Condidering the following polynome function:
###Code
def poly(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
- Create an equivalent Cython function of `poly` with name `poly_cy` without any cython improvement, just make its cell a cython cell. 3. time the performance of Python and Cython version of the function, what is the factor of speed up here between the two verions.
###Code
%%cython -a
def poly_cy(a,b):
return 10.5 * a + 3 * (b**2)
###Output
_____no_output_____
###Markdown
Let's time the performance of each function
###Code
%timeit poly(10**200,10**100)
%timeit poly_cy(10**200,10**200)
###Output
_____no_output_____
###Markdown
4. Now lets work on another examples using loop. - rewrite the same function below fib that calculate fibonacci series using cython, but now try to add type for the variables used inside it, add a prefix `_cy` to your new cython function.
###Code
def fib(n):
a, b = 1, 1
for i in range(n-1):
a, b = a + b, a
return a
%%cython -a
cpdef int fib_cy(int n):
cdef int i,a,b
a, b = 1, 1
for i in range(n-1):
a, b = a + b, a
return a
###Output
_____no_output_____
###Markdown
- time the two function for fibonacci series, with n = 20, what is the factor of speed now, What do you think?
###Code
# write your code here
%timeit fib(20)
%timeit fib_cy(20)
###Output
_____no_output_____
###Markdown
5. Recursive functions are functions that call themselves during their execution. Another interesting property of the Fibonacci series is that it can be written as a recursive function. That’s because each item depends on the values of other items (namely item n-1 and item n-2)- Rewrite the fib function using recursion. Is it faster than the non-recursive version? Does Cythonizing it give even more of an advantage?
###Code
# write your code here
def fib_reccur(n):
if n==0 or n==1:
return 1
if n==2:
return 2
return fib_reccur(n-1)+fib_reccur(n-2)
%%cython -a
cpdef int fib_reccur_cy(int n):
if n==0 or n==1:
return 1
if n==2:
return 2
return fib_reccur_cy(n-1)+fib_reccur_cy(n-2)
%timeit fib_reccur(20)
%timeit fib_reccur_cy(20)
###Output
_____no_output_____
###Markdown
We see that the the non-recursive method is much faster than the recursive methode ,but when we cythonize both of them they get faster than before. Exercise 2- Monte Carlo methods are a broad class of computational algorithms that rely on repeated random sampling to obtain numerical results. - One of the basic examples of getting started with the Monte Carlo algorithm is the estimation of Pi.**Estimation of Pi**- The idea is to simulate random (x, y) points in a 2-D plane with domain as a square of side 1 unit. - Imagine a circle inside the same domain with same diameter and inscribed into the square. - We then calculate the ratio of number points that lied inside the circle and total number of generated points. - Refer to the image below:We know that area of the square is 1 unit sq while that of circle is $\pi \ast (\frac{1}{2})^{2} = \frac{\pi}{4}$. Now for a very large number of generated points, The Algorithm1. Initialize cile_points, square_points and interval to 0.2. Generate random point x.3. Generate random point y.4. Calculate d = x*x + y*y.5. If d <= 1, increment circle_points.6. Increment square_points.7. Increment interval.8. If increment < NO_OF_ITERATIONS, repeat from 2.9. Calculate pi = 4*(circle_points/square_points).10. Terminate. **Your mission:** time the function `monte_carlo_pi`, identify the bottlenecks and create a new version using cython functionality to speed up monte carlo simulation for PI, use 100,000 points and compare the speed up factor between python and cython, considering the following optimizations:- add type for variables used.- add type for the function- use c rand function instead of python rand function. *Hint: you can import function from C libraries using the following approach `from libc. cimport `, replace the holders `` with the right identities for the current problem*
###Code
import random
def monte_carlo_pi(nsamples):
pi = 0.
cile_points= 0
square_points= 0
interval=0
for i in range(nsamples):
x=random.uniform(-1, 1)
y=random.uniform(-1, 1)
d=x**2 + y**2 # square of distance between M(x,y) and o(0,0)
if d<= 1: # if the point M(x,y) is inside the circle
cile_points+= 1
square_points+= 1
interval+=1
pi=4*(cile_points/square_points)
return pi
###Output
_____no_output_____
###Markdown
profile the function and identify the bottlenecks.
###Code
%prun monte_carlo_pi(1000000)
###Output
_____no_output_____
###Markdown
speed up the function
###Code
%%cython
from libc.stdlib cimport rand, srand, RAND_MAX
from libc.time cimport time
cpdef float monte_carlo_pi_cy(int nsamples):
cdef int i,cile_points=0
cdef float x,y,pi
srand(time(NULL))
for i in range(nsamples):
x=rand()/RAND_MAX
y=rand()/RAND_MAX
d=x**2 + y**2 # square of distance between M(x,y) and o(0,0)
if d<= 1: # if the point M(x,y) is inside the circle
cile_points+= 1
#print((cile_points))
pi=4*(cile_points/nsamples)
return pi
monte_carlo_pi_cy(100000)
###Output
_____no_output_____
###Markdown
Comparing the speed up factor between the two versions
###Code
%timeit monte_carlo_pi(1000000)
%timeit monte_carlo_pi_cy(1000000)
###Output
1.2 s ± 23.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
88.3 ms ± 381 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
Numba exercises Exercise 1Previously we considered how to approximateby Monte Carlo.- Use the same idea here, but make the code efficient using Numba.- Compare speed with and without Numba when the sample size is large.
###Code
# Your code here
###Output
_____no_output_____ |
Tutorial2/.ipynb_checkpoints/Tutorial 2 - Policy iteration-checkpoint.ipynb | ###Markdown
Example of Value iteration We will work out an example of the policy iteration algorithm on the Frozen Lake gym env https://gym.openai.com/envs/FrozenLake-v0/ 1) Environment breakdown
###Code
import numpy as np
import gym
from gym.envs.registration import register
# make a non-slippery version of Frozen lake
register(
id='FrozenLakeNotSlippery-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4', 'is_slippery': False},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
# create environment
env = gym.make("FrozenLakeNotSlippery-v0")
# make sure it is the correct format
if type(env) == gym.wrappers.time_limit.TimeLimit:
env = env.unwrapped
env.nS # Get the number of states
env.nA # Get the number of actions
print('Environment has %d states and %d actions.' % (env.nS, env.nA))
# display env
env.render()
# The actions referrence
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
# The environment dynamics, represented as a dictionary of dictionaries
# getting the info for P(.|s,a)
state = 0
action = LEFT
env.P[state][action] # returns the [(P(s'|s, a), s', reward, is_terminal), ... ]
###Output
_____no_output_____
###Markdown
2) What we need to doFind a policy $\pi(a|s)$, which is a conditional probability distribution that describes how we should act in the environment
###Code
# Example: Uniform random policy
policy = np.ones([env.nS, env.nA]) / env.nA
policy
###Output
_____no_output_____
###Markdown
Demonstration of running this policy in the environment
###Code
def run_policy(env, policy):
'''
Function for running a policy on an environment.
Its just a way to visualize what you policy is doing in the env
'''
# initialize the env
state = env.reset()
env.render()
print("\n ------ \n")
total_reward = 0
num_steps = 0
# iterate through the environment until termination
while True:
action = np.random.choice(env.nA, 1, p = policy[state])[0] # sample an action from your policy
next_state, reward, is_terminal, _ = env.step(action) # use that action on the env and get info back
env.render()
print("\n ------ \n")
state = next_state
total_reward += reward
num_steps += 1
if is_terminal:
break
print("The episode ended after {0} steps with a total reward of {1}".format(num_steps, total_reward))
run_policy(env, policy)
###Output
[41mS[0mFFF
FHFH
FFFH
HFFG
------
(Left)
[41mS[0mFFF
FHFH
FFFH
HFFG
------
(Left)
[41mS[0mFFF
FHFH
FFFH
HFFG
------
(Up)
[41mS[0mFFF
FHFH
FFFH
HFFG
------
(Down)
SFFF
[41mF[0mHFH
FFFH
HFFG
------
(Down)
SFFF
FHFH
[41mF[0mFFH
HFFG
------
(Down)
SFFF
FHFH
FFFH
[41mH[0mFFG
------
The episode ended after 6 steps with a total reward of 0.0
###Markdown
3) Policy Evaluation  
###Code
def policy_evaluation(env, policy, discount = 1.0, epsilon = 10 ** -8):
V = np.zeros(env.nS) # initialize the states to zero
while(True):
delta = 0
for state in range(env.nS):
v_old = V[state]
sum = 0
for action, action_prob in enumerate(policy[state]):
for state_prob, next_state, reward, end in env.P[state][action]:
sum += action_prob * state_prob * (reward + discount * V[next_state])
V[state] = sum
delta = max(delta, np.abs(V[state] - v_old))
if delta < epsilon: return V
# random policy evaluation
V = policy_evaluation(env, policy, discount = 0.9)
V
###Output
_____no_output_____
###Markdown
4) Policy Imporvement First lets talk about action values
###Code
def get_q_values(env, state, V, discount = 1):
'''
Function to return the all the action values of the form Q(s,a) for a given state s.
'''
action_values = np.zeros(env.nA)
for action in range(env.nA):
for prob, next_state, reward, done in env.P[state][action]:
action_values[action] += prob * (reward + discount * V[next_state])
return action_values
###Output
_____no_output_____
###Markdown
Policy imporvement is just being greedy
###Code
def policy_improvement(env, policy, V, discount = 1.0):
for state in range(env.nS):
action_values = get_q_values(env, state, V, discount = discount) # get the q values associated to state s
best_action = np.argmax(action_values) # choose best action
policy[state] = np.eye(env.nA)[best_action] # greedy update of policy
return policy
###Output
_____no_output_____
###Markdown
5) Policy iteration 
###Code
def policy_iteration(env, discount = 1.0):
policy = np.ones([env.nS, env.nA]) / env.nA # create a random uniform policy
while True:
old_policy = policy
V = policy_evaluation(env, policy, discount = discount) # evaluate the policy
policy = policy_improvement(env, policy, V, discount = discount) # improve it
# stop when the policy doesn't change anymore. Use each the optimal policy
if np.array_equal(old_policy, policy):
break
return policy
env.reset()
opt_policy = policy_iteration(env, discount = 0.9)
# Recall
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
opt_policy
env.render()
# run the optimal policy
run_policy(env, opt_policy)
###Output
[41mS[0mFFF
FHFH
FFFH
HFFG
------
(Down)
SFFF
[41mF[0mHFH
FFFH
HFFG
------
(Down)
SFFF
FHFH
[41mF[0mFFH
HFFG
------
(Right)
SFFF
FHFH
F[41mF[0mFH
HFFG
------
(Down)
SFFF
FHFH
FFFH
H[41mF[0mFG
------
(Right)
SFFF
FHFH
FFFH
HF[41mF[0mG
------
(Right)
SFFF
FHFH
FFFH
HFF[41mG[0m
------
The episode ended after 6 steps with a total reward of 1.0
|
V253 Absorption von Alpha-, Beta- und Gamma-Strahlen/V253 Absorption von alpha-, beta- und gamma-Strahlung.ipynb | ###Markdown
Versuch 253: Absorption von $\alpha$-, $\beta$- & $\gamma$-Strahlung Auswertung der Messergebnisse
###Code
#Benötigte Pakete
import numpy as np
from scipy.optimize import curve_fit
import scipy.integrate as integrate
from scipy.stats import chi2
import matplotlib.pyplot as plt
%matplotlib inline
#Plot settings
plt.style.use('classic')
plt.rcParams["font.family"]='serif'
plt.rcParams["figure.figsize"][0] = 10
plt.rcParams["figure.figsize"][1] = 7
plt.rcParams['errorbar.capsize']=2
###Output
_____no_output_____
###Markdown
Teil 1: Messung des Nulleffekts
###Code
#Betriebsspannung
U1=520 #V
U1_err=5 #V
#Zerfälle in 5 Minuten = 300s
n0=140
n0_err=np.sqrt(140)
#Normierung auf Zerfälle/Sekunde
n0_s=140/300
n0_s_err=n0_err/300
###Output
_____no_output_____
###Markdown
Teil 2: Absorption von $\beta$-Strahlung in Aluminium
###Code
#Kennnummer des Präparats: GS 527
#Anzahl Zerfälle
n_beta=np.array([1574,967,678,443,315,181,125,307,166,122,75,67,359])
n_beta_err=np.sqrt(n_beta)
#Torzeit
T=np.array([30,30,30,30,30,30,30,120,120,120,120,120,300])
#Absorberdicke in mm
x1=np.array([0,0.3,0.6,0.9,1.2,1.5,1.8,2.1,2.4,2.7,3.0,3.3,4.3])
#aus letztem Messwert wird n0_beta bestimmt, n0_beta in Zerfälle/s
n0_beta=n_beta[12]/300
n0_beta_err=n_beta_err[12]/300
#Abzug der Nullmessung unter Berücksichtigung der jeweiligen Torzeit, angegeben in Zerfälle/s
n_beta_korr=n_beta/T-n0_beta
n_beta_korr_err=np.sqrt((n_beta_err/T)**2+n0_beta_err**2)
#Plot der Messdaten
plt.errorbar(x1,n_beta_korr, yerr=n_beta_korr_err,linewidth=1,linestyle='',marker='.',color='darkblue',label='Messwerte mit Fehler')
plt.plot(2*[2.7],[1e-2,1e2],color='darkred',label='geschätzte Maximalreichweite mit Fehler')
plt.plot(2*[2.6],[1e-2,1e2],color='darkred',linestyle='--')
plt.plot(2*[2.8],[1e-2,1e2],color='darkred',linestyle='--')
plt.xlabel(r'Absorberdicke $ x$ [mm]', size=20)
plt.ylabel(r'Anzahl der Zerfälle [$\frac{1}{s}$]', size=20)
plt.yscale('log')
plt.legend(frameon=True,fontsize='medium',loc='best', borderpad=1.2, borderaxespad=0.8)
plt.title(r'Diagramm 1a: Absorption von $\beta$-Strahlung in Aluminium', size=20)
plt.grid(ls='dotted')
#Abspeichern des Bildes im Format "PDF"
plt.tight_layout()
plt.savefig('Diagramme/V253Diagramm1a.pdf', format='PDF')
#Plot der Messdaten nochmal nicht logarithmisch, da einige Messwerte eigentlich nur Untergrund waren
#und deshalb jetzt teilweise negativ sind
plt.errorbar(x1,n_beta_korr, yerr=n_beta_korr_err,linewidth=1,linestyle='',marker='.',color='darkblue',label='Messwerte mit Fehler')
plt.plot(2*[2.7],[-20,1e2],color='darkred',label='geschätzte Maximalreichweite mit Fehler')
plt.plot(2*[2.6],[-20,1e2],color='darkred',linestyle='--')
plt.plot(2*[2.8],[-20,1e2],color='darkred',linestyle='--')
plt.xlabel(r'Absorberdicke $ x$ [mm]', size=20)
plt.ylabel(r'Anzahl der Zerfälle [$\frac{1}{s}$]', size=20)
plt.legend(frameon=True,fontsize='large',loc='best', borderpad=1.5, borderaxespad=1.5)
plt.title(r'Diagramm 1b: Absorption von $\beta$-Strahlung in Aluminium', size=20)
plt.grid(ls='dotted')
#Abspeichern des Bildes im Format "PDF"
plt.tight_layout()
plt.savefig('Diagramme/V253Diagramm1b.pdf', format='PDF')
#Maximalreichweite x_max aus Plots
x_max=2.7e-1 #cm
x_max_err=0.1e-1
rho_alu=2.6989 #g/cm^3
R_beta_ES=0.130 #g/cm^2
#Flächendichte R_beta ergibt sich aus R_beta=rho_alu*x_max+R_beta_ES
R_beta=rho_alu*x_max+R_beta_ES
R_beta_err=rho_alu*x_max_err
print('Die Flächendichte R_beta berechnet sich zu:')
print('R_beta = (',R_beta,' +/- ',R_beta_err,') g/cm^2')
#Mit Diagramm 8 entspricht dies einer Energie von (1.8+/-0.2)MeV (Fehler aus Diagramm abgeschätzt)
E_max=1.8 #MeV
E_max_err=0.2
print('Dies entspricht einer Maximalenergie von E_max = (',E_max,'+/-',E_max_err,') MeV')
print()
#Vergleich mit Literaturwert (aus Skript) liefert:
E_max_lit=2.274 #MeV
diff=np.abs(E_max-E_max_lit)
diff_err=E_max_err
print('Der Vergleich mit dem Literaturwert ('+str(E_max_lit)+' MeV) liefert:')
print('∆E_max =(' + str(diff) + ' +/- ' + str(diff_err)+ ') MeV')
print('Damit folgt Sigma =',round(diff/diff_err,2))
###Output
Die Flächendichte R_beta berechnet sich zu:
R_beta = ( 0.8587030000000001 +/- 0.026989000000000003 ) g/cm^2
Dies entspricht einer Maximalenergie von E_max = ( 1.8 +/- 0.2 ) MeV
Der Vergleich mit dem Literaturwert (2.274 MeV) liefert:
∆E_max =(0.474 +/- 0.2) MeV
Damit folgt Sigma = 2.37
###Markdown
Teil 3: Absorption von $\gamma$-Strahlung in Blei
###Code
#Kennnummer des Präparats: A AE 8603
#Anzahl Zerfälle
n_gamma=np.array([3950,2623,2052,1566,1094,861,621,513,377,306,225])
n_gamma_err=np.sqrt(n_gamma)
#Torzeit immer 60s
#Absorberdicke in mm
x2=10*np.array([0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0])
#Abzug der Nullmessung unter Berücksichtigung der jeweiligen Torzeit
n_gamma_korr=1/60*(n_gamma-(n0_s*60)*np.ones(11)) #in Zerfälle/s
n_gamma_korr_err=1/60*np.sqrt(n_gamma_err**2+(60*n0_s_err*np.ones(11))**2)
#linearer Fit
def exp_fit(x,a,b):
return a*np.exp(-b*x)
popt,pcov=curve_fit(exp_fit,x2,n_gamma_korr,sigma=n_gamma_korr_err,p0=[62,0.05])
#Plot der Messdaten
plt.errorbar(x2,n_gamma_korr, yerr=n_gamma_korr_err,linewidth=1,linestyle='',marker='.',color='darkblue',label='Messwerte mit Fehler')
plt.plot(np.linspace(-3,53,2),exp_fit(np.linspace(-3,53,2),*popt),color='darkred',label='Linearer Fit', linewidth=0.8)
plt.xlabel(r'Absorberdicke $ x$ [mm]', size=20)
plt.ylabel(r'Anzahl der Zerfälle [$\frac{1}{s}$]', size=20)
plt.yscale('log')
plt.legend(frameon=True,fontsize='large',loc='best', borderpad=1.5, borderaxespad=1.5)
plt.title(r'Diagramm 2: Absorption von $\gamma$-Strahlung in Blei', size=20)
plt.grid(ls='dotted')
#Abspeichern des Bildes im Format "PDF"
plt.tight_layout()
plt.savefig('Diagramme/V253Diagramm2.pdf', format='PDF')
#Güte des Fits
chi_squared2=np.sum((exp_fit(x2,*popt)-n_gamma_korr)**2/n_gamma_korr_err**2)
#Freiheitsgrade
dof2=len(n_gamma_korr)-2
chi_squared2_red=chi_squared2/dof2
print('Wir erhalten die nachfolgenden Werte für die Güte des Fits:')
print('chi_squared= ' + str(chi_squared2))
print('chi_squared_red= ' + str(chi_squared2_red))
print()
#Fitwahrscheinlichkeit
prob2=round(1-chi2.cdf(chi_squared2,dof2),2)*100
print('Die Fitwahrscheinlichkeit beträgt: ' + str(prob2) + ' %')
#Schwächungskoeffizient aus Geradensteigung
mu=10*popt[1] #1/cm
mu_err=10*(pcov[1,1])**0.5
print('Aus dem linearen Fit folgt für den Schwächungskoeffizienten:')
print('mu = (' + str(mu) + ' +/- ' + str(mu_err) + ') 1/cm' )
#Massenschwächungskoeffizient
rho_pb=11.342 #g/cm^3
mu_rho=mu/rho_pb #cm^2/g
mu_rho_err=mu_rho*(mu_err/mu)
print()
print('Für den materialunabhängigen Massenschwächungskoeffizienten mu/rho folgt:')
print('mu/rho = (' + str(mu_rho) + ' +/- ' + str(mu_rho_err) + ') cm^2/g')
###Output
Aus dem linearen Fit folgt für den Schwächungskoeffizienten:
mu = (0.598944335731 +/- 0.0128840856391) 1/cm
Für den materialunabhängigen Massenschwächungskoeffizienten mu/rho folgt:
mu/rho = (0.0528076473048 +/- 0.00113596240866) cm^2/g
###Markdown
Ablesen der zugehörigen Energie aus Diagramm 9 in der Anleitung liefert:$E_\gamma= 1.4 \pm 0.1 \ MeV$ Die beiden möglichen Übergänge haben nach Skript die beiden Energien $E_1= 1.173\ MeV$ und $E_2= 1.333\ MeV$.Der gemessene Wert ist im 3$\sigma$- bzw. 1$\sigma$-Intervall mit den Literaturwerten vereinbar. Teil 4: Aktivität des vorliegenden $\gamma$-Strahlers
###Code
#Abstand Präparat und Zählrohr
d=np.array([0.05,0.10,0.20]) #m
d_err=3e-3*np.ones(3)
#registrierte Zerfälle, direkt in Zerfälle/s
N=1/60*np.array([39504,11939,3164])
N_err=1/60*np.sqrt(60*N)
#Radius des Zählrohrs
r=7e-3 #m
#Raumwinkel
Omega=np.pi*r**2/d**2
Omega_err=Omega*2*d_err/d
#Ansprechwahrscheinlichkeit Zählrohr für gamma-Strahlung
epsilon= 0.04
epsilon_err=epsilon*0.03
#Aktivität für gesamte Kugelfläche
A=0.5*4*N/epsilon*d**2/r**2 #Faktor 0.5, da 2 gamma-Quanten pro Zerfall
A_err=A*np.sqrt((N_err/N)**2+(d_err/d)**2+(epsilon_err/epsilon)**2)
print('Für die Aktivität A des Gamma-Strahlers folgt:')
print('A = ' + str(A*1e-3)+' kBq')
print('+/- ' + str(A_err*1e-3)+' kBq')
#Herstellerangabe: 3700kBq am 2.2.2015
A0=3700e3 #Bq
#Versuchsdatum: 5.3.2018; Differenz:365+366+365+28+3=1127 Tage = 3.086 Jahre
del_t=3.086 #Jahre
#Halbwertszeit nach Skript T12=5.27 Jahre
T12=5.27 #Jahre
#Aktivität am Versuchstag
A_lit=A0*np.exp(-np.log(2)*del_t/T12)
print('Für die Aktivität A_lit des Gamma-Strahlers am Versuchstag folgt:')
print('A_lit = ' + str(A_lit*1e-3)+' kBq')
#Vergleich der Messwerte mit Literaturwert
diff_A=np.abs(A-A_lit)
diff_A_err=A_err
print('Der Vergleich mit dem Literaturwert liefert:')
print('diff = ' + str(diff_A*1e-3)+' kBq')
print(' +/- ' + str(diff_A_err*1e-3)+' kBq')
print('Damit folgt Sigma =',(diff_A/diff_A_err).round(2))
###Output
Der Vergleich mit dem Literaturwert liefert:
diff = [ 786.02141874 435.17107861 313.2323031 ] kBq
+/- [ 112.98690428 88.12584794 81.7070466 ] kBq
Damit folgt Sigma = [ 6.96 4.94 3.83]
###Markdown
4.1: Raumwinkel-Korrektur
###Code
#Länge l des Zählrohrs: l=4cm
l=4e-2 #m
#Korrekturfaktor k1=(1+0.5*l/d)**2
k1=(1+0.5*l/d)**2
k1_err=2*(1+0.5*l/d)*0.5*l/d**2*d_err
print('Der Korrekturfaktor k1 ergibt sich zu:')
print('k1 = ' + str(k1))
print(' +/- ' + str(k1_err))
print()
#korrigierte Aktivität A_korr=A*k1
A_korr=A*k1
A_korr_err=A_korr*np.sqrt((A_err/A)**2+(k1_err/k1)**2)
print('Die korrigierte Aktivität A_korr ergibt sich zu:')
print('A_korr = ' + str(A_korr*1e-3)+' kBq')
print(' +/- ' + str(A_korr_err*1e-3)+' kBq')
#Vergleich der Messwerte mit Literaturwert
diff_A_korr=np.abs(A_korr-A_lit)
diff_A_korr_err=A_korr_err
print('Der Vergleich mit dem Literaturwert liefert:')
print('diff = ' + str(diff_A_korr*1e-3)+' kBq')
print(' +/- ' + str(diff_A_korr_err*1e-3)+' kBq')
print('Damit folgt Sigma =',(diff_A_korr/diff_A_korr_err).round(2))
###Output
Der Vergleich mit dem Literaturwert liefert:
diff = [ 826.38674452 458.22347922 138.7676969 ] kBq
+/- [ 248.55851575 130.22596525 99.12034548] kBq
Damit folgt Sigma = [ 3.32 3.52 1.4 ]
###Markdown
4.2: Absorptions-Korrektur
###Code
#Präparatkapseldaten
d=0.14 #cm
rho_kapsel=7.9 #g/cm^3
#mu_rho aus vorherigem Aufgabenteil
mu_kapsel=mu_rho*rho_kapsel #1/cm
mu_kapsel_err=mu_kapsel*mu_rho_err/mu_rho
#Korrekturfaktor k2=exp(-mu*x)
k2=np.exp(-mu_kapsel*d)
k2_err=k2*d*mu_kapsel_err
print('Der Korrekturfaktor k2 ergibt sich zu:')
print('k2 = ' + str(k2)+' +/- ' + str(k2_err))
print()
#korrigierte Aktivität A_korr2=A_korr*k2
A_korr2=A_korr*k2
A_korr2_err=A_korr2*np.sqrt((A_korr_err/A_korr)**2+(k2_err/k2)**2)
print('Die korrigierte Aktivität A_korr2 ergibt sich zu:')
print('A_korr2 = ' + str(A_korr2*1e-3)+' kBq')
print(' +/- ' + str(A_korr2_err*1e-3)+' kBq')
#Vergleich der Messwerte mit Literaturwert
diff_A_korr2=np.abs(A_korr2-A_lit)
diff_A_korr2_err=A_korr2_err
print('Der Vergleich mit dem Literaturwert liefert:')
print('diff = ' + str(diff_A_korr2*1e-3)+' kBq')
print(' +/- ' + str(diff_A_korr2_err*1e-3)+' kBq')
print('Damit folgt Sigma =',(diff_A_korr2/diff_A_korr2_err).round(2))
###Output
Der Vergleich mit dem Literaturwert liefert:
diff = [ 639.62369459 292.3472137 8.98507644] kBq
+/- [ 234.4896521 122.88679547 93.54794041] kBq
Damit folgt Sigma = [ 2.73 2.38 0.1 ]
###Markdown
Teil 5: Absorptionsmessung und Energiebestimmung von $\alpha$-Strahlung
###Code
#Anzahl Zerfälle
n_alpha=np.array([13387,13206,12998,12743,12029,9735,6682,3737,1094,423,253,222,243,246,241])
n_alpha_err=np.sqrt(n_alpha)
#Torzeit immer 60s
#Druck in mbar
p=np.array([22,123,220,319,353,390,420,448,488,517,567,619,717,818,916])
#Plot der Messdaten
plt.errorbar(p,n_alpha, yerr=n_alpha_err,linewidth=1,linestyle='',marker='.',color='darkblue',label='Messwerte mit Fehler')
plt.xlabel(r'Druck $ p$ [mbar]', size=20)
plt.ylabel(r'Anzahl der Zerfälle [$\frac{1}{60s}$]', size=20)
plt.plot([0,1e3],2*[0.5*n_alpha[0]],color='darkred',label='halbe Maximalzählrate')
#plt.yscale('log')
plt.legend(frameon=True,fontsize='large',loc='best', borderpad=1.2, borderaxespad=0.8)
plt.title(r'Diagramm 3: Absorption von $\alpha$-Strahlung in Luft', size=20)
plt.grid(ls='dotted')
#Abspeichern des Bildes im Format "PDF"
plt.tight_layout()
plt.savefig('Diagramme/V253Diagramm3.pdf', format='PDF')
#Druck p1, bei dem die Zählrate auf die Hälfte abgefallen ist
p1=420 #mbar
p1_err=10
#Normaldruck
p0=1013 #mbar
#Abstand Präparat - Zählrohr
s0=4.20 #cm
s0_err=0.05
#Reichweite bei p1
s1=p1/p0*s0
s1_err=s1*np.sqrt((p1_err/p1)**2+(s0_err/s0)**2)
#Korrekturen
#Flächendichte Glimmerfenster
rho_Gl=2.25 #mg/cm^2
s2=rho_Gl/1.43 #cm
#Schutzschicht aus Gold
s3=0.68 #cm
s_ges=s1+s2+s3
s_ges_err=s1_err
print('Die Gesamtreichweite ergibt sich zu:')
print('s_ges = (' + str(s_ges)+' +/- ' + str(s_ges_err)+') cm')
#Aus Diagramm 8 ergibt sich eine Energie von E_alpha= 5.6 +/- 0.2 MeV
E_alpha=5.6 #MeV
E_alpha_err=0.2
#Vergleich mit Literaturwert (aus Skript) liefert:
E_alpha_lit=5.48 #MeV
diff2=np.abs(E_alpha-E_alpha_lit)
diff2_err=E_alpha_err
print('Der Vergleich mit dem Literaturwert ('+str(E_alpha_lit)+' MeV) liefert:')
print('∆E_alpha =(' + str(diff2) + ' +/- ' + str(diff2_err)+ ') MeV')
print('Damit folgt Sigma =',round(diff2/diff2_err,2))
###Output
Der Vergleich mit dem Literaturwert (5.48 MeV) liefert:
∆E_alpha =(0.12 +/- 0.2) MeV
Damit folgt Sigma = 0.6
|
tutoriels/sqlalchemy-orm-vs-core.ipynb | ###Markdown
Import des modules nécessaires à l'ensemble
###Code
import pandas as pd
from sqlalchemy import create_engine
###Output
_____no_output_____
###Markdown
1. Lecture de données et réflexion sur le modèle On importe des donnnées dans `pandas`.
###Code
# Je force le type à str afin qu'il ne convertisse rien en int ou float
df = pd.read_csv('sirene-dummy.csv', dtype=str)
df
###Output
_____no_output_____
###Markdown
Imaginons que ce soit le "fichier du jour" de la base SIRENE, et qu'on veuille le conserver dans une base de données locale (l'historiser).On peut simplement faire :
###Code
# On crée une base sqlite bidon, dans la RAM du PC, juste pour l'exercice
engine = create_engine('sqlite://')
connection = engine.connect()
# On écrit la table (il la crée, du coup).
df.to_sql('stock-sirene', connection, if_exists='replace', index=False)
# Vérifions
pd.read_sql_table('stock-sirene', connection)
connection.close()
###Output
_____no_output_____
###Markdown
Ce n'est cependant **pas très satisfaisant** :- Certaines données sont inutilement répétées (le nom de l'entreprise, de la ville).- On ne fait pas l'effort de donner des informations sur la structure des données. 2. Un nouvel espoir : le modèle de données En vrai, on a (au moins) **trois types d'objets en présence** :- une entreprise,- un établissement,- une ville.Décrivons-les. Entreprises
###Code
# On a besoin du SIREN et du nom de l'entreprise
dft = df[['SIREN', 'NOM ENTREPRISE']].drop_duplicates()
dft = dft.rename(columns={
'SIREN': 'id',
'NOM ENTREPRISE': 'name'
})
dft
# Et on envoie dans SQL
with engine.connect() as connection:
dft.to_sql('entreprises', connection, if_exists='replace')
###Output
_____no_output_____
###Markdown
Etablissements
###Code
# On a besoin du SIRET, du nom d'établissement et de la ville
dft = df[['SIREN', 'NIC', 'NOM ETABLISSEMENT', 'VILLE']]
dft['SIRET'] = dft['SIREN'] + dft['NIC']
dft = dft.drop(columns=['NIC'])
dft = dft.rename(columns={
'NOM ETABLISSEMENT': 'name',
'VILLE': 'cityname',
'SIRET': 'id',
'SIREN': 'companyid'
})
dft
# Et on envoie dans SQL
with engine.connect() as connection:
dft.to_sql('etablissements', connection, if_exists='replace')
###Output
_____no_output_____
###Markdown
**Remarque :** On pourrait également faire une table pour les villes, et du coup les tables entreprises et établissements ne contiendrait que par exemple le code INSEE de la ville. 3. Alors pourquoi un ORM ? On a vu qu'il était très facile et efficace d'utiliser directement les outils de base de `SQLAlchemy` pour exporter dans une base SQL.Du côté requête, l'ORM va cependant être très pratique.Par ailleurs, il nous oblige à structurer encore plus notre modèle. Besoin de 2-3 choses en plus
###Code
from sqlalchemy import Column, create_engine, ForeignKey, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
Base = declarative_base()
class Company(Base):
# le nom de la table dans la base
__tablename__ = "entreprises"
# les définitions des colonnes (avec plus de précisions)
id = Column(String(9), primary_key=True) # SIREN
name = Column(String)
# relation : ça ne va être utile qu'en Python, ça ne change rien à la table.
branches = relationship("Branch", back_populates="company")
# et ça c'est juste pour afficher proprement les objets "Company"
def __repr__(self):
return "<Company(id={}, name={})>".format(self.id, self.name)
class Branch(Base):
# le nom de la table dans la base
__tablename__ = "etablissements"
# les définitions des colonnes (avec plus de précisions)
id = Column(String(14), primary_key=True) # SIRET
companyid = Column(String(9), ForeignKey('entreprises.id')) # SIREN
name = Column(String)
cityname = Column(String)
# relation : ça ne va être utile qu'en Python, ça ne change rien à la table.
company = relationship("Company", back_populates="branches")
# et ça c'est juste pour afficher proprement les objets "Branch"
def __repr__(self):
return "<Branch(id={}, name={})>".format(self.id, self.name)
###Output
_____no_output_____
###Markdown
On exécute cela une fois pour créer le schéma dans la base et le lier avec le modèle Python.
###Code
Base.metadata.create_all(bind=engine)
###Output
_____no_output_____
###Markdown
Maintenant on peut requêter
###Code
Session = sessionmaker(bind=engine)
session = Session()
session.query(Branch).all()
###Output
_____no_output_____
###Markdown
Remarque que ce que l'on obtient en sortie est un vrai objet Python, pas juste une liste de lignes :
###Code
b = session.query(Branch).first()
print(type(b))
session.close()
###Output
_____no_output_____
###Markdown
Et faire des requêtes plus complexes/sympa Pour en voir plus : http://docs.sqlalchemy.org/en/latest/orm/tutorial.htmlquerying
###Code
session = Session()
c = session.query(Company) \
.filter_by(name='FNAC') \
.first()
c
###Output
_____no_output_____
###Markdown
Et à quoi servaient les attributs créés avec la fonction `relationship` ?
###Code
print(c.branches)
###Output
[<Branch(id=12345678900001, name=FNAC ANGERS)>, <Branch(id=12345678900002, name=FNAC ROUEN)>, <Branch(id=12345678900003, name=FNAC PARIS)>]
###Markdown
On peut alors facilement boucler dessus, etc. La syntaxe est assez agréable.
###Code
for branch in c.branches:
print(branch.name)
###Output
FNAC ANGERS
FNAC ROUEN
FNAC PARIS
###Markdown
Pour les `join` etc., voir ici : http://docs.sqlalchemy.org/en/latest/orm/tutorial.htmlquerying-with-joins
###Code
session.close()
###Output
_____no_output_____
###Markdown
Export dans un DataFrame
###Code
session = Session()
pd.read_sql_query(session.query(Branch).statement, session.bind)
session.close()
###Output
_____no_output_____ |
exploratory/Sean's Notebook/GetDataCopy.ipynb | ###Markdown
Simple OLS
###Code
def get_XY(features, delta_y, look_back_y, y_generator, fips=get_fips(), moving_window=True):
df = df_jhu[features]
df = df[df.fips % 1000 != 0] # remove aggregate states
df = df[df.State != 'PR'] # peurto rico has some weird data...
df = df[df.POP_ESTIMATE_2018 > 1000] # restrict to large counties since getting lots of data is difficult
# fill out missing data
df.at['02158', 'Area in square miles - Land area'] = 19673
df.at['02158', 'Density per square mile of land area - Population'] = 0.44
df.at['46102', 'Area in square miles - Land area'] = 2097
df.at['46102', 'Density per square mile of land area - Population'] = 6.5
df = df.iloc[np.where(df['fips'].map(lambda f : f in fips))]
n, d = df.shape
col_names = []
for i in range(look_back_y):
col_name = "y at t = -%d" %i
col_names.append(col_name)
df[col_name] = np.zeros(n)
X = []
Y = []
for fips in df.index:
base = df.loc[fips].values
try:
t, ys = y_generator.getY(int(fips))
except KeyError:
continue
if len(ys) == 0:
X.append(base)
Y.append(0)
continue
for j in range(-1, -len(ys), -1):
base = df.loc[fips].values
Y.append(ys[j])
for i in range(look_back_y):
if j - delta_y - i >= -len(ys):
base[-look_back_y + i] = ys[j - delta_y - i]
X.append(base)
if not moving_window:
break
df_new = pd.DataFrame(X, columns = df.columns)
df_new['target'] = Y
return df_new
def Rsquare(pred, actual):
return np.corrcoef(pred, actual)[0,1]**2
df_new = get_XY(features, 1, 5, delta_death_counter)
X = df_new.iloc[:,2:-1]
y = df_new['target'].values
plt.hist(y,bins=25)
print(np.sum(y > 10))
train_num = int(y.shape[0] * 0.66)
X_test = X.iloc[train_num:]
y_test = y[train_num:]
X_train = X.iloc[:train_num]
y_train = y[:train_num]
model = sm.OLS(y_train,X_train)
result = model.fit()
print(result.summary())
def Rsquare(pred, actual):
return np.corrcoef(pred, actual)[0,1]**2
print("R squared (out sample) is %f" % (Rsquare(result.predict(X_test), y_test)))
plt.scatter(result.predict(X_test), y_test)
plt.xlabel("Predicted Deaths")
plt.ylabel("Actual Deaths")
###Output
_____no_output_____
###Markdown
LightGBM
###Code
gbm = lgb.LGBMRegressor(num_leaves=5,
learning_rate=0.05,
n_estimators=200);
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l2',
early_stopping_rounds=10);
pred_train = gbm.predict(X_train, num_iteration=gbm.best_iteration_)
print("R squared (in sample) is %f" % Rsquare(pred_train, y_train))
pred_test = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
print("R squared (out sample) is %f" % Rsquare(pred_test, y_test))
plt.scatter(pred_test, y_test)
###Output
[1] valid_0's l2: 0.913215
Training until validation scores don't improve for 10 rounds
[2] valid_0's l2: 0.899906
[3] valid_0's l2: 0.890017
[4] valid_0's l2: 0.875959
[5] valid_0's l2: 0.865137
[6] valid_0's l2: 0.854416
[7] valid_0's l2: 0.84565
[8] valid_0's l2: 0.838149
[9] valid_0's l2: 0.833085
[10] valid_0's l2: 0.827167
[11] valid_0's l2: 0.822566
[12] valid_0's l2: 0.821444
[13] valid_0's l2: 0.819189
[14] valid_0's l2: 0.819564
[15] valid_0's l2: 0.816141
[16] valid_0's l2: 0.810641
[17] valid_0's l2: 0.810937
[18] valid_0's l2: 0.809621
[19] valid_0's l2: 0.809253
[20] valid_0's l2: 0.806446
[21] valid_0's l2: 0.803607
[22] valid_0's l2: 0.802451
[23] valid_0's l2: 0.80022
[24] valid_0's l2: 0.798681
[25] valid_0's l2: 0.797296
[26] valid_0's l2: 0.794282
[27] valid_0's l2: 0.794326
[28] valid_0's l2: 0.795491
[29] valid_0's l2: 0.797951
[30] valid_0's l2: 0.79608
[31] valid_0's l2: 0.803117
[32] valid_0's l2: 0.804089
[33] valid_0's l2: 0.802887
[34] valid_0's l2: 0.810012
[35] valid_0's l2: 0.812133
[36] valid_0's l2: 0.812082
Early stopping, best iteration is:
[26] valid_0's l2: 0.794282
R squared (in sample) is 0.599798
R squared (out sample) is 0.111430
###Markdown
Augmented Dickey Fuller Test for Stationary Time Series
###Code
X, y = delta_death_counter.getY(17001)
adfTest = ts.adfuller(y, autolag='AIC')
dfResults = pd.Series(adfTest[0:4], index=['ADF Test Statistic','P-Value','# Lags Used','# Observations Used'])
#Add Critical Values
for key,value in adfTest[4].items():
dfResults['Critical Value (%s)'%key] = value
print('Augmented Dickey-Fuller Test Results:')
print(dfResults)
###Output
Augmented Dickey-Fuller Test Results:
ADF Test Statistic NaN
P-Value NaN
# Lags Used 0.000000
# Observations Used 33.000000
Critical Value (1%) -3.646135
Critical Value (5%) -2.954127
Critical Value (10%) -2.615968
dtype: float64
###Markdown
Conclude: time series is stationary
###Code
fips_list = get_fips()
stationary = 0
non_stationary = 0
for fips in fips_list:
X, y = delta_death_ratio_counter.getY(fips)
if len(y) < 6:
continue
adfTest = ts.adfuller(y, maxlag=4)
if adfTest[1] > 0.05:
non_stationary += 1
else:
stationary += 1
print ("stationary %d, non-stat %d" % (stationary, non_stationary))
###Output
_____no_output_____
###Markdown
ARIMA Model
###Code
# some counties: King (53033), NYC (36061), Chicago (17031), LA (6037), Wayne, MI (26163), Queens(36059), King (nyc) (36047)
X, y = get_cum_deaths(53033)
plt.plot(X,y)
autocorrelation_plot(y)
plt.plot(pacf(y))
model = ARIMA(y, order=(5,2,1))
model_fit = model.fit(disp=0)
print(model_fit.summary())
residuals = pd.DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
print(residuals.describe())
train_sz = int(len(y) * .75)
train_y, test_y = y[:-5], y[-5:]
plt.plot(train_y)
model= ARIMA(train_y, order=(5,2,1))
model_fit = model.fit(disp=0)
pred = model_fit.forecast(5)[0]
plt.plot(test_y)
plt.plot(pred, color='red')
plt.show()
###Output
_____no_output_____
###Markdown
Doing Shit, But on States
###Code
class CumDeathCounterStates:
def __init__(self):
self.df = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_states.csv")
states = self.df.state.unique()
self.cache = {}
for state in states:
self.cache[state] = self.get_cum_deaths(state)
def get_cum_deaths(self, state_name):
df_state = self.df[self.df.state == state_name]
X = df_state.date.map(lambda day: (get_date(day) - get_date('2020-01-01')).days).values
y_deaths = df_state.deaths.values
return (X, y_deaths)
def getY(self, state):
return self.cache[state]
class CumCaseCounterStates:
def __init__(self):
self.df = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_states.csv")
states = self.df.state.unique()
self.cache = {}
for state in states:
self.cache[state] = self.get_cum_cases(state)
def get_cum_cases(self, state_name):
df_state = self.df[self.df.state == state_name]
X = df_state.date.map(lambda day: (get_date(day) - get_date('2020-01-01')).days).values
y_cases = df_state.cases.values
return (X, y_cases)
def getY(self, state):
return self.cache[state]
class DeltaCounterStates:
def __init__(self, cumDeathCounterStates):
self.cumDeathCounterStates = cumDeathCounterStates
def getY(self, state):
X, y = self.cumDeathCounterStates.getY(state)
y_true = [y[0]]
for i in range(1, len(y)):
y_true.append(y[i] - y[i-1])
return X, y_true
class RatioCounterStates:
def __init__(self, counter):
self.counter = counter
def getY(self, state, avg_period=5):
X_raw, y_raw = self.counter.getY(state)
y = []
running_sum = 0.0
running_time = 0
for i in range(len(X_raw)):
if y_raw[i] == 0:
y.append(0)
elif running_sum == 0:
y.append(1) # if this is the first case we define the signal as 1
else:
avg = running_sum/running_time
y.append(y_raw[i]/avg)
if running_time == avg_period:
running_sum = running_sum + y_raw[i] - y_raw[i - avg_period]
else:
running_sum = running_sum + y_raw[i]
running_time = running_time + 1
if running_sum == 0:
running_time = 1
offset = 0
while y[offset] == 0:
offset += 1
if offset == len(y):
return (X_raw, np.array(y))
return X_raw[offset:], y[offset:]
cumDeathCounterStates = CumDeathCounterStates()
cumCaseCounterStates = CumCaseCounterStates()
deltaDeathCounterStates = DeltaCounterStates(cumDeathCounterStates)
deltaCaseCounterStates = DeltaCounterStates(cumCaseCounterStates)
ratioCounterStates = RatioCounterStates(deltaDeathCounterStates)
###Output
_____no_output_____
###Markdown
Are State Daily Death Ratios Stationary (ADF, Again)
###Code
states_list = pd.read_csv(f"{homedir}/data/us/covid/nyt_us_states.csv").state.unique()
stationary = 0
non_stationary = 0
for state in states_list:
X, y = ratioCounterStates.getY(state,1)
if len(y) < 6:
continue
adfTest = ts.adfuller(y, maxlag=4)
if adfTest[1] > 0.05:
non_stationary += 1
print(state)
else:
stationary += 1
print ("stationary %d, non-stat %d" % (stationary, non_stationary))
X,y = ratioCounterStates.getY("California",1)
plt.plot(X,y)
autocorrelation_plot(y)
plt.plot(pacf(y))
train_sz = int(.66*len(y))
y_train = y[:train_sz]
y_test = y[train_sz:]
model= ARIMA(y_train, order=(7,0,3))
model_fit = model.fit(disp=1)
pred = model_fit.forecast(len(y) - train_sz)[0]
plt.plot(y_test)
plt.plot(pred, color='red')
plt.show()
###Output
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/tsatools.py:674: RuntimeWarning: invalid value encountered in double_scalars
tmp[kiter] = (macoefs[kiter]-b *macoefs[j-kiter-1])/(1-b**2)
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/tsatools.py:676: RuntimeWarning: divide by zero encountered in true_divide
invmacoefs = -np.log((1-macoefs)/(1+macoefs))
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/tsatools.py:650: RuntimeWarning: invalid value encountered in true_divide
newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/tsatools.py:651: RuntimeWarning: invalid value encountered in true_divide
tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tools/numdiff.py:243: RuntimeWarning: invalid value encountered in add
**kwargs)).imag/2./hess[i, j]
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tools/numdiff.py:243: RuntimeWarning: invalid value encountered in multiply
**kwargs)).imag/2./hess[i, j]
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/tsatools.py:650: RuntimeWarning: invalid value encountered in exp
newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/tsatools.py:651: RuntimeWarning: invalid value encountered in exp
tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
/home/sean/anaconda3/lib/python3.7/site-packages/statsmodels/base/model.py:488: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available
'available', HessianInversionWarning)
###Markdown
State OLS
###Code
df = df_jhu[features]
df = df[df.fips % 1000 == 0] # remove aggregate states
df.head()
def get_XY_states(features, delta_y, look_back_y, y_generator, moving_window=True):
df = df_jhu[features]
df = df[df.fips % 1000 == 0] # only consider aggregate states
df = df[df.State != 'PR'] # peurto rico has some weird data...
n, d = df.shape
col_names = []
for i in range(look_back_y):
col_name = "y at t = -%d" %i
col_names.append(col_name)
df[col_name] = np.zeros(n)
X = []
Y = []
for fips in df.index:
base = df.loc[fips].values
try:
t, ys = y_generator.getY(df.loc[fips]['Area_Name'])
except KeyError:
print('Key Err')
print(df.loc[fips]['Area_Name'])
continue
if len(ys) == 0:
X.append(base)
Y.append(0)
continue
for j in range(-1, -len(ys), -1):
base = df.loc[fips].values
Y.append(ys[j])
for i in range(look_back_y):
if j - delta_y - i >= -len(ys):
base[-look_back_y + i] = ys[j - delta_y - i]
X.append(base)
if not moving_window:
break
df_new = pd.DataFrame(X, columns = df.columns)
df_new['target'] = Y
return df_new
features_states= ['State', 'Area_Name','POP_ESTIMATE_2018', 'Area in square miles - Land area', 'Density per square mile of land area - Population'
, 'Active Physicians per 100000 Population 2018 (AAMC)', 'Active General Surgeons per 100000 Population 2018 (AAMC)',
'Non-profit hospital beds per 1000 people (2019)', 'Employed_2018', 'Unemployment_rate_2018'
, 'Total hospital beds per 1000 people (2019)', 'Total nurse practitioners (2019)',
'Total Hospitals (2019)','fips']
df_new = get_XY_states(features_states, 2, 5, deltaDeathCounterStates)
X = df_new.iloc[:,2:-1]
y = df_new['target'].values
plt.hist(y,bins=25);
train_num = int(y.shape[0] * 0.66)
X_test = X.iloc[train_num:]
y_test = y[train_num:]
X_train = X.iloc[:train_num]
y_train = y[:train_num]
model = sm.OLS(y_train,X_train)
result = model.fit()
print(result.summary())
print("R squared (out sample) is %f" % (Rsquare(result.predict(X_test).values, y_test)))
print("R squared (in sample) is %f" % (Rsquare(result.predict(X_train).values, y_train)))
gbm = lgb.LGBMRegressor(num_leaves=100,
learning_rate=0.05,
n_estimators=200,
reg_lambda=10);
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l2',
early_stopping_rounds=10);
pred_train = gbm.predict(X_train, num_iteration=gbm.best_iteration_)
print("R squared (in sample) is %f" % Rsquare(pred_train, y_train))
pred_test = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
print("R squared (out sample) is %f" % Rsquare(pred_test, y_test))
plt.scatter(pred_test, y_test)
###Output
[1] valid_0's l2: 483.262
Training until validation scores don't improve for 10 rounds
[2] valid_0's l2: 441.382
[3] valid_0's l2: 405.175
[4] valid_0's l2: 374.335
[5] valid_0's l2: 345.734
[6] valid_0's l2: 319.766
[7] valid_0's l2: 297.466
[8] valid_0's l2: 277.307
[9] valid_0's l2: 260.557
[10] valid_0's l2: 244.654
[11] valid_0's l2: 226.22
[12] valid_0's l2: 213.699
[13] valid_0's l2: 206.284
[14] valid_0's l2: 198.328
[15] valid_0's l2: 188.619
[16] valid_0's l2: 181.345
[17] valid_0's l2: 174.222
[18] valid_0's l2: 169.213
[19] valid_0's l2: 163.41
[20] valid_0's l2: 161.403
[21] valid_0's l2: 156.874
[22] valid_0's l2: 158.255
[23] valid_0's l2: 155.307
[24] valid_0's l2: 153.095
[25] valid_0's l2: 151.223
[26] valid_0's l2: 149.293
[27] valid_0's l2: 146.939
[28] valid_0's l2: 145.416
[29] valid_0's l2: 144.235
[30] valid_0's l2: 143.033
[31] valid_0's l2: 142.286
[32] valid_0's l2: 142.263
[33] valid_0's l2: 141.9
[34] valid_0's l2: 141.361
[35] valid_0's l2: 141.201
[36] valid_0's l2: 141.391
[37] valid_0's l2: 143.008
[38] valid_0's l2: 143.23
[39] valid_0's l2: 143.066
[40] valid_0's l2: 143.718
[41] valid_0's l2: 144.048
[42] valid_0's l2: 144.505
[43] valid_0's l2: 145.167
[44] valid_0's l2: 145.433
[45] valid_0's l2: 146.287
Early stopping, best iteration is:
[35] valid_0's l2: 141.201
R squared (in sample) is 0.936661
R squared (out sample) is 0.670257
###Markdown
Checkpoint 1
###Code
large_fips, small_fips = split_fips(cum_death_counter, min_thresh=20)
print("%d large fips (FIPS with at least 20 deaths)" % len(large_fips))
print("%d small fips" % len(small_fips))
###Output
197 large fips (FIPS with at least 20 deaths)
2949 small fips
###Markdown
Large-FIPS: Directly use OLS
###Code
# Get a list of OLS trained models
def train_models(fips_set, min_look_ahead=1, max_look_ahead=14):
models = []
for delta_y in range(min_look_ahead, max_look_ahead + 1):
df_new = get_XY(features, delta_y, 8, delta_death_counter, fips=fips_set)
X = df_new.iloc[:,2:-1]
y = df_new['target'].values
train_num = int(y.shape[0] * 0.66)
X_test = X.iloc[train_num:]
y_test = y[train_num:]
X_train = X.iloc[:train_num]
y_train = y[:train_num]
model = sm.OLS(y_train,X_train)
print("Training model to predict %d days in the future" % delta_y)
result = model.fit()
print("R squared (out sample) is %f" % (Rsquare(result.predict(X_test), y_test)))
models.append(result)
return models
model_list = train_models(large_fips)
def get_param_info(models, min_look_ahead=1, max_look_ahead=14):
params_mu = {}
params_stderr = {}
for i in range(len(models)):
model = models[i]
params_mu[i + min_look_ahead] = model.params
temp = model.bse
# some specific stuff for getting rid of extreme std errs in counties
for j in range(8):
temp[j] = 0
temp[11] = 0
temp[16] = 0
params_stderr[i + min_look_ahead] = temp
return params_mu, params_stderr
param_mu, param_stderr = get_param_info(model_list)
df_true = get_XY(features, 0, 8, delta_death_counter, fips=large_fips,moving_window=False) # "True X"
X_true = df_true.iloc[:,2:-1]
def gen_percentiles(X, param_mu, param_stderr, min_look_ahead=1, max_look_ahead=14):
fips_list = X.fips.values
output = {}
for i in range(min_look_ahead, max_look_ahead + 1):
n_samples = 400
fips_to_samples = {}
fips_to_percentiles = {}
for fips in fips_list:
fips_to_samples[fips] = []
fips_to_percentiles[fips] = {}
for j in range(n_samples):
param_sample = np.random.normal(param_mu[i], param_stderr[i])
res = np.matmul(X.values, param_sample)
for k in range(len(fips_list)):
fips_to_samples[fips_list[k]].append(res[k])
for fips in fips_list:
for percentile in range(10, 100, 10):
fips_to_percentiles[fips][percentile] = np.percentile(fips_to_samples[fips], percentile, axis=0)
output[i] = fips_to_percentiles
return output
percentiles = gen_percentiles(X_true, param_mu, param_stderr, 1, 14)
# Example: object indexed by num_days_in_future, FIPS, percentile
percentiles[14][36061][10] # 90% percentile on the prediction for number of deaths in NYC 5 days from now
###Output
_____no_output_____
###Markdown
Small FIPS: Use OLS on states and extrapolate
###Code
# Get a list of OLS trained models
def train_models_states(min_look_ahead=1, max_look_ahead=14):
models = []
for delta_y in range(min_look_ahead, max_look_ahead + 1):
df_new = get_XY_states(features_states, delta_y, 8, deltaDeathCounterStates, moving_window=True)
X = df_new.iloc[:,2:-1]
y = df_new['target'].values
train_num = int(y.shape[0] * 0.66)
X_test = X.iloc[train_num:]
y_test = y[train_num:]
X_train = X.iloc[:train_num]
y_train = y[:train_num]
model = sm.OLS(y_train,X_train)
print("Training model to predict %d days in the future" % delta_y)
result = model.fit()
print("R squared (out sample) is %f" % (Rsquare(result.predict(X_test), y_test)))
models.append(result)
return models
model_list_states = train_models_states()
def get_param_info(models, min_look_ahead=1, max_look_ahead=14):
params_mu = {}
params_stderr = {}
for i in range(len(models)):
model = models[i]
params_mu[i + min_look_ahead] = model.params
temp = model.bse
# some specific stuff for getting rid of extreme std errs in counties
for j in range(2):
temp[j] = 0
temp[11] = 0
temp[6] = 0
params_stderr[i + min_look_ahead] = temp
return params_mu, params_stderr
param_mu_states, params_stderr_states = get_param_info(model_list_states)
df_true_states = get_XY_states(features_states, 0, 8, deltaDeathCounterStates, moving_window=False)
X_true_states = df_true_states.iloc[:,2:-1]
def gen_percentiles_states(X, param_mu, param_stderr, state_list, min_look_ahead=1, max_look_ahead=14):
fips_list = state_list
output = {}
for i in range(min_look_ahead, max_look_ahead + 1):
n_samples = 400
fips_to_samples = {}
fips_to_percentiles = {}
for fips in fips_list:
fips_to_samples[fips] = []
fips_to_percentiles[fips] = {}
for j in range(n_samples):
param_sample = np.random.normal(param_mu[i].values, param_stderr[i].values)
res = np.matmul(X.values, param_sample)
for k in range(len(fips_list)):
fips_to_samples[fips_list[k]].append(res[k])
for fips in fips_list:
for percentile in range(10, 100, 10):
fips_to_percentiles[fips][percentile] = np.percentile(fips_to_samples[fips], percentile, axis=0)
output[i] = fips_to_percentiles
return output
percentiles_states = gen_percentiles_states(X_true_states, param_mu_states, params_stderr_states, df_true_states.State.values, 1, 14)
def FIPS_to_pop(fips):
return df_jhu[df_jhu.fips==fips].POP_ESTIMATE_2018.values[0]
def State_to_pop(state):
df = df_jhu[df_jhu.fips % 1000 == 0]
return df[df.State == 'NY'].POP_ESTIMATE_2018.values[0]
def FIPS_proportion_state_pop(fips):
state = FIPS_to_state(fips)
return FIPS_to_pop(fips)/State_to_pop(fips)
def FIPS_to_state(fips):
return df_jhu[df_jhu.fips==fips].State.values[0]
class FIPS_info:
def __init__(self, fips_list):
self.fip_to_state = {}
self.fip_to_pop = {}
self.fip_to_proportion = {}
for fips in fips_list:
self.fip_to_state[fips] = FIPS_to_state(fips)
self.fip_to_proportion[fips] = FIPS_proportion_state_pop(fips)
def FIPS_to_state(self, fips):
return self.fip_to_state[fips]
def FIPS_proportion_state_pop(self, fips):
return self.fip_to_proportion[fips]
# Remove shitty fips.
small_fips_filtered = []
for fips in small_fips:
try:
if(FIPS_proportion_state_pop(fips) >= 1):
print("what %d" % fips)
else:
small_fips_filtered.append(fips)
except:
print(fips)
fips_info = FIPS_info(small_fips_filtered)
def merge_percentiles(percentiles_county, percentiles_states, clean_small_fips, min_look_ahead=1, max_look_ahead=14):
for delta_y in range(min_look_ahead, max_look_ahead+1):
print(delta_y)
for fips in clean_small_fips:
percentiles_county[delta_y][fips] = {}
for percentile in range(10, 100, 10):
percentiles_county[delta_y][fips][percentile] = fips_info.FIPS_proportion_state_pop(fips) * percentiles_states[delta_y][fips_info.FIPS_to_state(fips)][percentile]
return percentiles_county
percentile_total = merge_percentiles(percentiles, percentiles_states, small_fips_filtered)
df_sample_sub = pd.read_csv(f"{homedir}/sample_submission.csv")
df_sample_sub['fips'] = df_sample_sub['id'].map(lambda i : int(i[11:]))
output_fips = df_sample_sub.fips.unique()
import datetime
import csv
def truncate(dec):
return int(100*dec)/100
with open('submission.csv', mode='w') as file:
writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['id', '10', '20', '30', '40', '50', '60', '70', '80', '90'])
today = datetime.date.today() + datetime.timedelta(days=-1);
for i in range(-23, 0):
target_day = today + datetime.timedelta(days=i)
string_pre = target_day.strftime('%Y-%m-%d-')
for fips in output_fips:
print_lst = [string_pre + str(fips)]
try:
X, y = delta_death_counter.getY(fips)
for j in range(9):
print_lst.append(y[i])
except:
for j in range(9):
print_lst.append(0)
writer.writerow(print_lst)
for delta_y in range(1, 69):
target_day = today + datetime.timedelta(days=delta_y-1)
string_pre = target_day.strftime('%Y-%m-%d-')
if delta_y > 14:
for fips in output_fips:
l = [string_pre + str(fips)]
writer.writerow(l + [0] * 9)
continue
for fips in output_fips:
print_lst = [string_pre + str(fips)]
if fips in percentiles[delta_y].keys():
for percentile in range(10,100,10):
datum = percentiles[delta_y][fips][percentile]
if datum < 0:
datum = 0
print_lst.append(truncate(datum))
else:
for i in range(9):
print_lst.append(0)
writer.writerow(print_lst)
your_submission = pd.read_csv('submission.csv').set_index('id').sort_index()
sample_submission = pd.read_csv(f"{homedir}/sample_submission.csv").set_index('id').sort_index()
assert(your_submission.index.equals(sample_submission.index))
###Output
_____no_output_____ |
teaching/stat_775_2021_fall/activities/activity-2021-09-10.ipynb | ###Markdown
Introduction to Python part VII (And a discussion of the covariance and the multivariate Gaussian) Activity 1: Discussion of multiple random variables * What is the covariance / correlation between two random variables and how are they related? * What do we refer to when we discuss the "background" covariance matrix? What special properties do we know about this matrix? * What is the anomaly matrix? How is this related to the ensemble matrix? * What is the relationship between the anomaly matrix and the empirical covariance? What is the relationship to the background covariance? * What is the implication of the multivariate central limit theorem for the ensemble mean? Activity 2: Looping in Python In the episode about visualizing data, we wrote Python code that plots values of interest from our first inflammation dataset (inflammation-01.csv), which revealed some suspicious features in it.We have a dozen data sets right now, though, and more on the way. We want to create plots for all of our data sets with a single statement. To do that, we’ll have to teach the computer how to repeat things.An example task that we might want to repeat is accessing numbers in a list, which we will do by printing each number on a line of its own. This is done with the `for` loop with syntax as follows:
###Code
odds = [1, 3, 5, 7, 9, 11]
for num in odds:
print(num)
###Output
_____no_output_____
###Markdown
This follows the convention in Python where we can name an iterator for a list or an interable object as an arbitrary variable name, and sequence over the iterable in order: Note that Python requires the use of the `:` to signify the end of the loop statement, and requires an indentation of four spaces to initiate the loop. The next line that ends the space convention will automatically end the loop. Exercise 1: As a quick exercise, see if the following loops work:
###Code
for num in odds
print(num)
for num in odds:
print(num)
for banana in odds:
print(banana)
###Output
_____no_output_____
###Markdown
Exercise 2:Python has a built-in function called `range` that generates a sequence of numbers. `range` can accept 1, 2, or 3 parameters. * If one parameter is given, range generates a sequence of that length, starting at zero and incrementing by 1. For example, `range(3)` produces the numbers 0, 1, 2. * If two parameters are given, range starts at the first and ends just before the second, incrementing by one. For example, `range(2, 5)` produces 2, 3, 4. * If range is given 3 parameters, it starts at the first one, ends just before the second one, and increments by the third one. For example, `range(3, 10, 2)` produces 3, 5, 7, 9.Using `range`, write a loop that uses range to print the first 3 natural numbers: Exercise 3:Given the following loop:```word = 'oxygen'for char in word: print(char)```How many times is the body of the loop executed? Exercise 4:Exponentiation is built into Python:
###Code
2**3
###Output
_____no_output_____
###Markdown
Write a loop that calculates the same result as 2 ** 3 using multiplication (and without exponentiation). Exercise 5:The built-in function `enumerate` takes a sequence (e.g. a list) and generates a new sequence of the same length. Each element of the new sequence is a pair composed of the index `(0, 1, 2,…)` and the value from the original sequence:
###Code
for idx, val in enumerate(a_list):
# Do something using idx and val
###Output
_____no_output_____
###Markdown
The code above loops through a_list, assigning the index to idx and the value to val.Suppose you have encoded a polynomial as a list of coefficients in the following way: the first element is the constant term, the second element is the coefficient of the linear term, the third is the coefficient of the quadratic term, etc.
###Code
x = 5
coefs = [2, 4, 3]
y = coefs[0] * x**0 + coefs[1] * x**1 + coefs[2] * x**2
print(y)
###Output
_____no_output_____ |
owid_subsetting.ipynb | ###Markdown
###Code
import pandas as pd
data = pd.read_csv('owid-covid-data.csv')
# telling panda the data types
data.date = pd.to_datetime(data.date)
data.location = data.location.astype('category')
# deletes all data except for listed
subset_data = data[['continent', 'location', 'date', 'new_cases', 'new_tests']]
# choose country here:
subset_data = subset_data[subset_data.location == 'Australia']
subset_data.new_cases = subset_data.new_cases.astype('int64')
subset_data.new_tests = subset_data.new_tests.fillna(0)
subset_data.new_tests = subset_data.new_tests.astype('int64')
subset_data.index = subset_data.date
# adds all the days of each month
subset_data.groupby(pd.Grouper(freq='m')).sum()
df = pd.DataFrame(subset_data, columns= ['continent', 'location', 'date', 'new_cases', 'new_tests'])
# change filename
df.to_csv ('owid_subset_data.csv', index = False, header=True)
def data_subset(target_country, output_filename):
data = pd.read_csv('owid-covid-data.csv')
data.date = pd.to_datetime(data.date)
data.location = data.location.astype('category')
subset_data = data[['continent', 'location', 'date', 'new_cases', 'new_tests']]
subset_data = subset_data[subset_data.location == target_country]
subset_data.new_cases = subset_data.new_cases.astype('int64')
subset_data.new_tests = subset_data.new_tests.fillna(0)
subset_data.new_tests = subset_data.new_tests.astype('int64')
subset_data.index = subset_data.date
subset_data.groupby(pd.Grouper(freq='m')).sum()
df = pd.DataFrame(subset_data, columns= ['continent', 'location', 'date', 'new_cases', 'new_tests'])
df.to_csv (output_filename, index = False, header=True)
data_subset('Unite Kingdom', 'owid_UK.csv')
subset_data = subset_data.dropna(['new_cases'])
###Output
_____no_output_____ |
DIP(5)_Concat.ipynb | ###Markdown
데이터 연결 기초 데이터프레임끼리 연결하기
###Code
import pandas as pd
df1 = pd.read_csv('DoitPandas_Resource/data/concat_1.csv')
df2 = pd.read_csv('DoitPandas_Resource/data/concat_3.csv')
df3 = pd.read_csv('DoitPandas_Resource/data/concat_3.csv')
row_concat = pd.concat([df1, df2, df3])
row_concat
row_concat.iloc[3, ]
###Output
_____no_output_____
###Markdown
데이터프레임에 시리즈 연결하기
###Code
new_row_series = pd.Series(['n1','n2','n3','n4'])
pd.concat([df1, new_row_series])
new_row_df = pd.DataFrame([['n1','n2','n3','n4']], columns = ['A','B','C','D'])
new_row_df
pd.concat([df2, new_row_df])
###Output
_____no_output_____
###Markdown
시리즈는 행이 하나인 데이터 프레임이라 생각하면 된다. 열이름을 따로 지정해 주어야 한다. append
###Code
df1.append(new_row_df)
###Output
_____no_output_____
###Markdown
concat매서드는 한번에 두 개 이상의 데이터프레임을 연결할 수 있음. 만약 연결할 데이터프레임이 1개라면 append를 써도 됨 dict 와 ignore_index = True 를 사용하여 시리즈를 데이터프레임에 연결하기
###Code
new_series = {'A': 'm1', 'B': 'm2', 'C': 'm3', 'D': 'm4'}
df1.append(new_series, ignore_index = True)
###Output
_____no_output_____
###Markdown
ignore_index 사용
###Code
row_concat = pd.concat([df1, df2, df3], ignore_index = True)
row_concat
###Output
_____no_output_____
###Markdown
axis = 1
###Code
col_concat = pd.concat([df1, df2], axis = 1)
col_concat
col_concat['A']
###Output
_____no_output_____
###Markdown
새로운 열 추가하기
###Code
new_col = ['A', 'B', 'C', 'D']
col_concat['New_Col'] = new_col
col_concat
###Output
_____no_output_____
###Markdown
새로운 열을 추가하되 인덱스 중복 안 되게 순차적으로 부여하기
###Code
col_concat_ignore_index = pd.concat([df1, df2, df3], axis=1, ignore_index = True)
col_concat_ignore_index
###Output
_____no_output_____
###Markdown
공통 열과 공통 인덱스만 연결하기
###Code
df1.columns = ['A', 'B', 'C', 'D']
df2.columns = ['E', 'F', 'G', 'H']
df3.columns = ['A', 'C', 'F', 'H']
df2
###Output
_____no_output_____
###Markdown
각 데이터프레임의 열이름을 겹치지 않게 바꿔본 뒤 Concat해보기
###Code
pd.concat([df1, df2, df3])
###Output
_____no_output_____
###Markdown
누락값이 너무 많이 생김 join = 'inner'
###Code
print(pd.concat([df1,df2,df3], join = 'inner'))
pd.concat([df1,df3], join = 'inner')
###Output
_____no_output_____
###Markdown
데이터 프레임을 행 방향으로 결측치 없이 결합하기
###Code
df1.index = [0,1,2,3]
df2.index = [4,5,6,7]
df3.index = [0,2,5,7]
pd.concat([df1, df2, df3], axis = 1)
pd.concat([df1,df3], axis = 1, join = 'inner')
###Output
_____no_output_____
###Markdown
데이터 연결 마무리 merge 매서드
###Code
person = pd.read_csv('DoitPandas_Resource/data/survey_person.csv')
site = pd.read_csv('DoitPandas_Resource/data/survey_site.csv')
survey = pd.read_csv('DoitPandas_Resource/data/survey_survey.csv')
visited = pd.read_csv('DoitPandas_Resource/data/survey_visited.csv')
person
site
visited
survey
visited_subset = visited.loc[[0,2,6], ]
visited_subset
site.merge(visited_subset, left_on = 'name', right_on = 'site')
site.merge(visited, left_on = 'name', right_on = 'site')
###Output
_____no_output_____ |
nycflights.ipynb | ###Markdown
Preguntas sencillas ¿Cuántos años tengo en el df y cuántos fueron los vuelos totales?
###Code
pd.unique(flights['year'])
###Output
_____no_output_____
###Markdown
Ahora por obvias razones sabemos que todos los vuelos son de ese año, pero si tuviera más años la manera de saber sería agrupay y después contar. Aquí sucede que como cada renglón corresponde a un vuelo, podemos contar el tamaño. En algunos casos tenemos que cada renglón corresponde a más de una o que más de un renglón corresponde solo a 1 obsevación.
###Code
vuelos_por_año = flights.groupby('year').size()
vuelos_por_año.head()
###Output
_____no_output_____
###Markdown
Unique se pude usar en todo, por ejemplo los destinos o las salidas.
###Code
flights['dest'].unique()
###Output
_____no_output_____
###Markdown
Tenemos un montón de destinos, de hecho quiero intentar algo con todas los destinos. Pero seguiremos haciendo pequeñas preguntas.
###Code
## Filtremos un destino x
flights_to_MIA = flights[flights['dest']=='MIA']
flights_to_MIA.head()
###Output
_____no_output_____
###Markdown
¿Cuántos vuelos tuvimos a Miami?
###Code
len(flights_to_MIA.index)
###Output
_____no_output_____
###Markdown
De todos los destinos. ¿Cuál tuvo más vuelos?
###Code
vuelos_por_destino = flights.groupby('dest').size()
vuelos_por_destino.sort_values(ascending=False, inplace = True) ## para que la gráfica no salga tan fea
vuelos_por_destino.head()
destinos = pd.unique(flights['dest'])
plt.bar(destinos, vuelos_por_destino)
plt.ylabel('Número de vuelos')
plt.xlabel('Destinos')
plt.show()
###Output
_____no_output_____
###Markdown
Edit, en este punto me harté de matplolib y encontré una solución más sencilla en sns.
###Code
import seaborn as sns
f, ax = plt.subplots(figsize=(6, 15))
sns.set_color_codes("pastel")
sns.barplot(vuelos_por_destino.values,vuelos_por_destino.index,
label="Total", color="b")
## Meeeh en R lo podría hacer mucho mejor pero así se aprende no?
###Output
_____no_output_____
###Markdown
Pregunta... Cada destino tuvo un origen, quiero saber cuál es el destino más frecuente de cada origen. Evidentemente necesitaré un loop, primero necesito una estrategia para sacar el destino más frecuente de un origen, usaremos el de Miami porque ya lo tenemos
###Code
flights_to_MIA.groupby(flights_to_MIA['origin']).size().reset_index(name='counts')
###Output
_____no_output_____
###Markdown
Lo que tenemos es la cantidad de vuelos que tuvieron como destino Miami por cada destino... Usaré lambdas y agrupar en lugar de un loop, me parece mucho más elegante. De aquí podríamos seguir con más preguntas por ejemplo ¿Qué origen se repite más y menos? pero me parece refrito de lo de arriba.
###Code
s = flights.groupby('dest').agg(most_common_origin=('origin',lambda x : x.mode()[0]),
max_count=('origin',lambda x : x.value_counts().sort_values().iloc[-1]),
least_common_origin=('origin',lambda x : x.value_counts().sort_values().iloc[[0]].index),
min_count=('origin',lambda x : x.value_counts().sort_values().iloc[0]))
s
###Output
_____no_output_____
###Markdown
Seguimos respondiendo algunas preguntas... Me interesa trabajar con las fechas, por ejemplo a qué hora tenemos más vuelos o en qué meses. Por la información de arriba sé que no está en el formato adecuado así que lo sieguiente es cambiarlo.
###Code
flights['time_hour'] = pd.to_datetime(flights['time_hour'])
flights.info()
flights.head()
## Cuando empezaba a programar en R un ejercicio era hacer gráficas del tiempo total de vuelo por mes
## Quiero ver si lo puedo replicar, aunque ahora que lo veo solamente es trivial
var = ['tailnum','distance', 'month']
df1 = flights[var]
df1.head()
df2 = df1.groupby(['tailnum', 'month'])['distance'].sum()
df2.head()
###Output
_____no_output_____ |
sessions/Session_23_Interpretable_Machine_Learning.ipynb | ###Markdown
What-If Tool in colab and jupyter notebooksThis notebook shows use of the [What-If Tool](https://pair-code.github.io/what-if-tool) inside of a colab or jupyter notebook.If running in colab, you can use this notebook out-of-the-box.If running in jupyter, you must run the What-If Tool [widget installation instructions](https://github.com/tensorflow/tensorboard/tree/master/tensorboard/plugins/interactive_inferencehow-do-i-use-it-in-a-jupyter-notebook) before using this notebook.This notebook trains a linear classifier on the [UCI census problem](https://archive.ics.uci.edu/ml/datasets/census+income) (predicting whether a person earns more than $50K from their census information).It then visualizes the results of the trained classifier on test data using the What-If Tool.
###Code
#@title Install the What-If Tool widget if running in Colab {display-mode: "form"}
# If running in Colab then pip install, otherwise no need.
try:
import google.colab
!pip install --upgrade witwidget
except Exception:
pass
#@title Define helper functions {display-mode: "form"}
import pandas as pd
import numpy as np
import tensorflow as tf
import functools
# Creates a tf feature spec from the dataframe and columns specified.
def create_feature_spec(df, columns=None):
feature_spec = {}
if columns == None:
columns = df.columns.values.tolist()
for f in columns:
if df[f].dtype is np.dtype(np.int64):
feature_spec[f] = tf.io.FixedLenFeature(shape=(), dtype=tf.int64)
elif df[f].dtype is np.dtype(np.float64):
feature_spec[f] = tf.io.FixedLenFeature(shape=(), dtype=tf.float32)
else:
feature_spec[f] = tf.io.FixedLenFeature(shape=(), dtype=tf.string)
return feature_spec
# Creates simple numeric and categorical feature columns from a feature spec and a
# list of columns from that spec to use.
#
# NOTE: Models might perform better with some feature engineering such as bucketed
# numeric columns and hash-bucket/embedding columns for categorical features.
def create_feature_columns(columns, feature_spec):
ret = []
for col in columns:
if feature_spec[col].dtype is tf.int64 or feature_spec[col].dtype is tf.float32:
ret.append(tf.feature_column.numeric_column(col))
else:
ret.append(tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(col, list(df[col].unique()))))
return ret
# An input function for providing input to a model from tf.Examples
def tfexamples_input_fn(examples, feature_spec, label, mode=tf.estimator.ModeKeys.EVAL,
num_epochs=None,
batch_size=64):
def ex_generator():
for i in range(len(examples)):
yield examples[i].SerializeToString()
dataset = tf.data.Dataset.from_generator(
ex_generator, tf.dtypes.string, tf.TensorShape([]))
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=2 * batch_size + 1)
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda tf_example: parse_tf_example(tf_example, label, feature_spec))
dataset = dataset.repeat(num_epochs)
return dataset
# Parses Tf.Example protos into features for the input function.
def parse_tf_example(example_proto, label, feature_spec):
parsed_features = tf.io.parse_example(serialized=example_proto, features=feature_spec)
target = parsed_features.pop(label)
return parsed_features, target
# Converts a dataframe into a list of tf.Example protos.
def df_to_examples(df, columns=None):
examples = []
if columns == None:
columns = df.columns.values.tolist()
for index, row in df.iterrows():
example = tf.train.Example()
for col in columns:
if df[col].dtype is np.dtype(np.int64):
example.features.feature[col].int64_list.value.append(int(row[col]))
elif df[col].dtype is np.dtype(np.float64):
example.features.feature[col].float_list.value.append(row[col])
elif row[col] == row[col]:
example.features.feature[col].bytes_list.value.append(row[col].encode('utf-8'))
examples.append(example)
return examples
# Converts a dataframe column into a column of 0's and 1's based on the provided test.
# Used to force label columns to be numeric for binary classification using a TF estimator.
def make_label_column_numeric(df, label_column, test):
df[label_column] = np.where(test(df[label_column]), 1, 0)
#@title Read training dataset from CSV {display-mode: "form"}
import pandas as pd
# Set the path to the CSV containing the dataset to train on.
csv_path = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
# Set the column names for the columns in the CSV. If the CSV's first line is a header line containing
# the column names, then set this to None.
csv_columns = [
"Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital-Status",
"Occupation", "Relationship", "Race", "Sex", "Capital-Gain", "Capital-Loss",
"Hours-per-week", "Country", "Over-50K"]
# Read the dataset from the provided CSV and print out information about it.
df = pd.read_csv(csv_path, names=csv_columns, skipinitialspace=True)
df
#@title Specify input columns and column to predict {display-mode: "form"}
import numpy as np
# Set the column in the dataset you wish for the model to predict
label_column = 'Over-50K'
# Make the label column numeric (0 and 1), for use in our model.
# In this case, examples with a target value of '>50K' are considered to be in
# the '1' (positive) class and all other examples are considered to be in the
# '0' (negative) class.
make_label_column_numeric(df, label_column, lambda val: val == '>50K')
# Set list of all columns from the dataset we will use for model input.
input_features = [
'Age', 'Workclass', 'Education', 'Marital-Status', 'Occupation',
'Relationship', 'Race', 'Sex', 'Capital-Gain', 'Capital-Loss',
'Hours-per-week', 'Country']
# Create a list containing all input features and the label column
features_and_labels = input_features + [label_column]
#@title Convert dataset to tf.Example protos {display-mode: "form"}
examples = df_to_examples(df)
#@title Create and train the classifier {display-mode: "form"}
num_steps = 5000 #@param {type: "number"}
# Create a feature spec for the classifier
feature_spec = create_feature_spec(df, features_and_labels)
# Define and train the classifier
train_inpf = functools.partial(tfexamples_input_fn, examples, feature_spec, label_column)
classifier = tf.estimator.LinearClassifier(
feature_columns=create_feature_columns(input_features, feature_spec))
classifier.train(train_inpf, steps=num_steps)
#@title Invoke What-If Tool for test data and the trained model {display-mode: "form"}
num_datapoints = 2000 #@param {type: "number"}
tool_height_in_px = 1000 #@param {type: "number"}
from witwidget.notebook.visualization import WitConfigBuilder
from witwidget.notebook.visualization import WitWidget
# Load up the test dataset
test_csv_path = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test'
test_df = pd.read_csv(test_csv_path, names=csv_columns, skipinitialspace=True,
skiprows=1)
make_label_column_numeric(test_df, label_column, lambda val: val == '>50K.')
test_examples = df_to_examples(test_df[0:num_datapoints])
# Setup the tool with the test examples and the trained classifier
config_builder = WitConfigBuilder(test_examples).set_estimator_and_feature_spec(
classifier, feature_spec).set_label_vocab(['Under 50K', 'Over 50K'])
WitWidget(config_builder, height=tool_height_in_px)
###Output
_____no_output_____ |
nbs/datasets/datasets.beibei.ipynb | ###Markdown
BeiBei Dataset> This data is collected from Beibei which is one of the largest infant product online retailing site in China. It involves three types of user-item interactions, including page view, add-to-cart and purchase.
###Code
#hide
from nbdev.showdoc import *
from fastcore.nb_imports import *
from fastcore.test import *
#export
import numpy as np
import scipy.sparse as sp
import pickle
from recohut.datasets.bases.common import Dataset
from recohut.utils.common_utils import *
#export
class BeibeiDataset(Dataset):
def __init__(self, data_dir):
self.behs = ['pv', 'cart', 'buy']
super().__init__(data_dir)
self._process()
@property
def raw_file_names(self):
return ['trn_buy','trn_cart','trn_pv','tst_int']
@property
def processed_file_names(self):
return 'data.zip'
def download(self):
urls = ['https://github.com/RecoHut-Datasets/beibei/raw/v1/trn_buy',
'https://github.com/RecoHut-Datasets/beibei/raw/v1/trn_cart',
'https://github.com/RecoHut-Datasets/beibei/raw/v1/trn_pv',
'https://github.com/RecoHut-Datasets/beibei/raw/v1/tst_int']
for url in urls:
_ = download_url(url, self.raw_dir)
def process(self):
trnMats = list()
for path in self.raw_paths[:3]:
with open(path, 'rb') as fs:
mat = (pickle.load(fs) != 0).astype(np.float32)
trnMats.append(mat)
# test set
path = self.raw_paths[-1]
with open(path, 'rb') as fs:
tstInt = np.array(pickle.load(fs))
tstStat = (tstInt != None)
tstUsrs = np.reshape(np.argwhere(tstStat != False), [-1])
self.trnMats = trnMats
self.tstInt = tstInt
self.tstUsrs = tstUsrs
self.user, self.item = self.trnMats[0].shape
self.behNum = len(self.behs)
adj = 0
for i in range(self.behNum):
adj = adj + self.trnMats[i]
adj = (adj != 0).astype(np.float32)
self.labelP = np.squeeze(np.array(np.sum(adj, axis=0)))
tpadj = self.transpose(adj)
adjNorm = np.reshape(np.array(np.sum(adj, axis=1)), [-1])
tpadjNorm = np.reshape(np.array(np.sum(tpadj, axis=1)), [-1])
for i in range(adj.shape[0]):
for j in range(adj.indptr[i], adj.indptr[i+1]):
adj.data[j] /= adjNorm[i]
for i in range(tpadj.shape[0]):
for j in range(tpadj.indptr[i], tpadj.indptr[i+1]):
tpadj.data[j] /= tpadjNorm[i]
self.adj = adj
self.tpadj = tpadj
@staticmethod
def transpose(mat):
coomat = sp.coo_matrix(mat)
return sp.csr_matrix(coomat.transpose())
@staticmethod
def make_mask(nodes, size):
mask = np.ones(size)
if not nodes is None:
mask[nodes] = 0.0
return mask
@staticmethod
def update_bdgt(adj, nodes):
if nodes is None:
return 0
tembat = 1000
ret = 0
for i in range(int(np.ceil(len(nodes) / tembat))):
st = tembat * i
ed = min((i+1) * tembat, len(nodes))
temNodes = nodes[st: ed]
ret += np.sum(adj[temNodes], axis=0)
return ret
@staticmethod
def sample(budget, mask, sampNum):
score = (mask * np.reshape(np.array(budget), [-1])) ** 2
norm = np.sum(score)
if norm == 0:
return np.random.choice(len(score), 1), sampNum - 1
score = list(score / norm)
arrScore = np.array(score)
posNum = np.sum(np.array(score)!=0)
if posNum < sampNum:
pckNodes1 = np.squeeze(np.argwhere(arrScore!=0))
# pckNodes2 = np.random.choice(np.squeeze(np.argwhere(arrScore==0.0)), min(len(score) - posNum, sampNum - posNum), replace=False)
# pckNodes = np.concatenate([pckNodes1, pckNodes2], axis=0)
pckNodes = pckNodes1
else:
pckNodes = np.random.choice(len(score), sampNum, p=score, replace=False)
return pckNodes, max(sampNum - posNum, 0)
@staticmethod
def transToLsts(mat, mask=False, norm=False):
shape = [mat.shape[0], mat.shape[1]]
coomat = sp.coo_matrix(mat)
indices = np.array(list(map(list, zip(coomat.row, coomat.col))), dtype=np.int32)
data = coomat.data.astype(np.float32)
if norm:
rowD = np.squeeze(np.array(1 / (np.sqrt(np.sum(mat, axis=1) + 1e-8) + 1e-8)))
colD = np.squeeze(np.array(1 / (np.sqrt(np.sum(mat, axis=0) + 1e-8) + 1e-8)))
for i in range(len(data)):
row = indices[i, 0]
col = indices[i, 1]
data[i] = data[i] * rowD[row] * colD[col]
# half mask
if mask:
spMask = (np.random.uniform(size=data.shape) > 0.5) * 1.0
data = data * spMask
if indices.shape[0] == 0:
indices = np.array([[0, 0]], dtype=np.int32)
data = np.array([0.0], np.float32)
return indices, data, shape
def construct_data(self, adjs, usrs, itms):
pckAdjs = []
pckTpAdjs = []
for i in range(len(adjs)):
pckU = adjs[i][usrs]
tpPckI = self.transpose(pckU)[itms]
pckTpAdjs.append(tpPckI)
pckAdjs.append(self.transpose(tpPckI))
return pckAdjs, pckTpAdjs, usrs, itms
def sample_large_graph(self, pckUsrs, pckItms=None, sampDepth=2, sampNum=1e3, preSamp=False):
adj = self.adj
tpadj = self.tpadj
usrMask = self.make_mask(pckUsrs, adj.shape[0])
itmMask = self.make_mask(pckItms, adj.shape[1])
itmBdgt = self.update_bdgt(adj, pckUsrs)
if pckItms is None:
pckItms, _ = self.sample(itmBdgt, itmMask, len(pckUsrs))
itmMask = itmMask * self.make_mask(pckItms, adj.shape[1])
usrBdgt = self.update_bdgt(tpadj, pckItms)
uSampRes = 0
iSampRes = 0
for i in range(sampDepth + 1):
uSamp = uSampRes + (sampNum if i < sampDepth else 0)
iSamp = iSampRes + (sampNum if i < sampDepth else 0)
newUsrs, uSampRes = self.sample(usrBdgt, usrMask, uSamp)
usrMask = usrMask * self.make_mask(newUsrs, adj.shape[0])
newItms, iSampRes = self.sample(itmBdgt, itmMask, iSamp)
itmMask = itmMask * self.make_mask(newItms, adj.shape[1])
if i == sampDepth or i == sampDepth and uSampRes == 0 and iSampRes == 0:
break
usrBdgt += self.update_bdgt(tpadj, newItms)
itmBdgt += self.update_bdgt(adj, newUsrs)
usrs = np.reshape(np.argwhere(usrMask==0), [-1])
itms = np.reshape(np.argwhere(itmMask==0), [-1])
return self.construct_data(usrs, itms)
###Output
_____no_output_____
###Markdown
Example
###Code
ds = BeibeiDataset(data_dir='/content/data')
ds.trnMats[0]
###Output
_____no_output_____ |
Applied Text Mining in Python/Assignment+4.ipynb | ###Markdown
---_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._--- Assignment 4 - Document Similarity & Topic Modelling Part 1 - Document SimilarityFor the first part of this assignment, you will complete the functions `doc_to_synsets` and `similarity_score` which will be used by `document_path_similarity` to find the path similarity between two documents.The following functions are provided:* **`convert_tag:`** converts the tag given by `nltk.pos_tag` to a tag used by `wordnet.synsets`. You will need to use this function in `doc_to_synsets`.* **`document_path_similarity:`** computes the symmetrical path similarity between two documents by finding the synsets in each document using `doc_to_synsets`, then computing similarities using `similarity_score`.You will need to finish writing the following functions:* **`doc_to_synsets:`** returns a list of synsets in document. This function should first tokenize and part of speech tag the document using `nltk.word_tokenize` and `nltk.pos_tag`. Then it should find each tokens corresponding synset using `wn.synsets(token, wordnet_tag)`. The first synset match should be used. If there is no match, that token is skipped.* **`similarity_score:`** returns the normalized similarity score of a list of synsets (s1) onto a second list of synsets (s2). For each synset in s1, find the synset in s2 with the largest similarity value. Sum all of the largest similarity values together and normalize this value by dividing it by the number of largest similarity values found. Be careful with data types, which should be floats. Missing values should be ignored.Once `doc_to_synsets` and `similarity_score` have been completed, submit to the autograder which will run `test_document_path_similarity` to test that these functions are running correctly. *Do not modify the functions `convert_tag`, `document_path_similarity`, and `test_document_path_similarity`.*
###Code
import numpy as np
import nltk
from nltk.corpus import wordnet as wn
import pandas as pd
def convert_tag(tag):
"""Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets"""
tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
def doc_to_synsets(doc):
"""
Returns a list of synsets in document.
Tokenizes and tags the words in the document doc.
Then finds the first synset for each word/tag combination.
If a synset is not found for that combination it is skipped.
Args:
doc: string to be converted
Returns:
list of synsets
Example:
doc_to_synsets('Fish are nvqjp friends.')
Out: [Synset('fish.n.01'), Synset('be.v.01'), Synset('friend.n.01')]
"""
tokens = nltk.word_tokenize(doc)
pos = nltk.pos_tag(tokens)
tags = [tag[1] for tag in pos]
wntag = [convert_tag(tag) for tag in tags]
ans = list(zip(tokens,wntag))
sets = [wn.synsets(x,y) for x,y in ans]
final = [val[0] for val in sets if len(val) > 0]
return final
def similarity_score(s1, s2):
"""
Calculate the normalized similarity score of s1 onto s2
For each synset in s1, finds the synset in s2 with the largest similarity value.
Sum of all of the largest similarity values and normalize this value by dividing it by the
number of largest similarity values found.
Args:
s1, s2: list of synsets from doc_to_synsets
Returns:
normalized similarity score of s1 onto s2
Example:
synsets1 = doc_to_synsets('I like cats')
synsets2 = doc_to_synsets('I like dogs')
similarity_score(synsets1, synsets2)
Out: 0.73333333333333339
"""
s1=[]
for i1 in s1:
r=[]
scores=[x for x in [i1.path_similarity(i2) for i2 in s2]if x is not None]
if scores:
s.append(max(scores))
return sum(s)/len(s)
def document_path_similarity(doc1, doc2):
"""Finds the symmetrical similarity between doc1 and doc2"""
synsets1 = doc_to_synsets(doc1)
synsets2 = doc_to_synsets(doc2)
return (similarity_score(synsets1, synsets2) + similarity_score(synsets2, synsets1)) / 2
###Output
_____no_output_____
###Markdown
test_document_path_similarityUse this function to check if doc_to_synsets and similarity_score are correct.*This function should return the similarity score as a float.*
###Code
def test_document_path_similarity():
doc1 = 'This is a function to test document_path_similarity.'
doc2 = 'Use this function to see if your code in doc_to_synsets \
and similarity_score is correct!'
return document_path_similarity(doc1, doc2)
###Output
_____no_output_____
###Markdown
___`paraphrases` is a DataFrame which contains the following columns: `Quality`, `D1`, and `D2`.`Quality` is an indicator variable which indicates if the two documents `D1` and `D2` are paraphrases of one another (1 for paraphrase, 0 for not paraphrase).
###Code
# Use this dataframe for questions most_similar_docs and label_accuracy
paraphrases = pd.read_csv('paraphrases.csv')
paraphrases.head()
###Output
_____no_output_____
###Markdown
___ most_similar_docsUsing `document_path_similarity`, find the pair of documents in paraphrases which has the maximum similarity score.*This function should return a tuple `(D1, D2, similarity_score)`*
###Code
def most_similar_docs():
similarities = [(paraphrase['D1'], paraphrase['D2'], document_path_similarity(paraphrase['D1'], paraphrase['D2']))
for index, paraphrase in paraphrases.iterrows()]
similarity = max(similarities, key=lambda item:item[2])
return similarity
###Output
_____no_output_____
###Markdown
label_accuracyProvide labels for the twenty pairs of documents by computing the similarity for each pair using `document_path_similarity`. Let the classifier rule be that if the score is greater than 0.75, label is paraphrase (1), else label is not paraphrase (0). Report accuracy of the classifier using scikit-learn's accuracy_score.*This function should return a float.*
###Code
def label_accuracy():
from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score
df = paraphrases.apply(update_sim_score, axis=1)
score = accuracy_score(df['Quality'].tolist(), df['paraphrase'].tolist())
return score
###Output
_____no_output_____
###Markdown
Part 2 - Topic ModellingFor the second part of this assignment, you will use Gensim's LDA (Latent Dirichlet Allocation) model to model topics in `newsgroup_data`. You will first need to finish the code in the cell below by using gensim.models.ldamodel.LdaModel constructor to estimate LDA model parameters on the corpus, and save to the variable `ldamodel`. Extract 10 topics using `corpus` and `id_map`, and with `passes=25` and `random_state=34`.
###Code
import pickle
import gensim
from sklearn.feature_extraction.text import CountVectorizer
# Load the list of documents
with open('newsgroups', 'rb') as f:
newsgroup_data = pickle.load(f)
# Use CountVectorizor to find three letter tokens, remove stop_words,
# remove tokens that don't appear in at least 20 documents,
# remove tokens that appear in more than 20% of the documents
vect = CountVectorizer(min_df=20, max_df=0.2, stop_words='english',
token_pattern='(?u)\\b\\w\\w\\w+\\b')
# Fit and transform
X = vect.fit_transform(newsgroup_data)
# Convert sparse matrix to gensim corpus.
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
# Mapping from word IDs to words (To be used in LdaModel's id2word parameter)
id_map = dict((v, k) for k, v in vect.vocabulary_.items())
ldamodel = gensim.models.ldamodel.LdaModel(corpus, id2word=id_map, num_topics=10, passes=25, random_state=34)
###Output
_____no_output_____
###Markdown
lda_topicsUsing `ldamodel`, find a list of the 10 topics and the most significant 10 words in each topic. This should be structured as a list of 10 tuples where each tuple takes on the form:`(9, '0.068*"space" + 0.036*"nasa" + 0.021*"science" + 0.020*"edu" + 0.019*"data" + 0.017*"shuttle" + 0.015*"launch" + 0.015*"available" + 0.014*"center" + 0.014*"sci"')`for example.*This function should return a list of tuples.*
###Code
def lda_topics():
topics_words = ldamodel.print_topics(num_topics=10, num_words=10)
return topics_words
###Output
_____no_output_____
###Markdown
topic_distributionFor the new document `new_doc`, find the topic distribution. Remember to use vect.transform on the the new doc, and Sparse2Corpus to convert the sparse matrix to gensim corpus.*This function should return a list of tuples, where each tuple is `(topic, probability)`*
###Code
new_doc = ["\n\nIt's my understanding that the freezing will start to occur because \
of the\ngrowing distance of Pluto and Charon from the Sun, due to it's\nelliptical orbit. \
It is not due to shadowing effects. \n\n\nPluto can shadow Charon, and vice-versa.\n\nGeorge \
Krumins\n-- "]
def topic_distribution():
new_doc_transformed = vect.transform(new_doc)
corpus = gensim.matutils.Sparse2Corpus(new_doc_transformed, documents_columns=False)
doc_topics = ldamodel.get_document_topics(corpus)
topic_dist = []
for val in list(doc_topics):
for v in val:
topic_dist.append(v)
return topic_dist
topic_distribution()
###Output
_____no_output_____
###Markdown
topic_namesFrom the list of the following given topics, assign topic names to the topics you found. If none of these names best matches the topics you found, create a new 1-3 word "title" for the topic.Topics: Health, Science, Automobiles, Politics, Government, Travel, Computers & IT, Sports, Business, Society & Lifestyle, Religion, Education.*This function should return a list of 10 strings.*
###Code
def topic_names():
return ["Education", "Automobiles", "Computers & IT", "Religion", "Automobiles", "Sports", "Health", "Religion", "Computers & IT", "Science"]
###Output
_____no_output_____
###Markdown
---_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._--- Assignment 4 - Document Similarity & Topic Modelling Part 1 - Document SimilarityFor the first part of this assignment, you will complete the functions `doc_to_synsets` and `similarity_score` which will be used by `document_path_similarity` to find the path similarity between two documents.The following functions are provided:* **`convert_tag:`** converts the tag given by `nltk.pos_tag` to a tag used by `wordnet.synsets`. You will need to use this function in `doc_to_synsets`.* **`document_path_similarity:`** computes the symmetrical path similarity between two documents by finding the synsets in each document using `doc_to_synsets`, then computing similarities using `similarity_score`.You will need to finish writing the following functions:* **`doc_to_synsets:`** returns a list of synsets in document. This function should first tokenize and part of speech tag the document using `nltk.word_tokenize` and `nltk.pos_tag`. Then it should find each tokens corresponding synset using `wn.synsets(token, wordnet_tag)`. The first synset match should be used. If there is no match, that token is skipped.* **`similarity_score:`** returns the normalized similarity score of a list of synsets (s1) onto a second list of synsets (s2). For each synset in s1, find the synset in s2 with the largest similarity value. Sum all of the largest similarity values together and normalize this value by dividing it by the number of largest similarity values found. Be careful with data types, which should be floats. Missing values should be ignored.Once `doc_to_synsets` and `similarity_score` have been completed, submit to the autograder which will run `test_document_path_similarity` to test that these functions are running correctly. *Do not modify the functions `convert_tag`, `document_path_similarity`, and `test_document_path_similarity`.*
###Code
import numpy as np
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
from nltk.corpus import wordnet as wn
import pandas as pd
# Need to feed pos tags to this function!
def convert_tag(tag):
"""Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets"""
tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
def doc_to_synsets(doc):
"""
Returns a list of synsets in document.
Tokenizes and tags the words in the document doc.
Then finds the first synset for each word/tag combination.
If a synset is not found for that combination it is skipped.
Args:
doc: string to be converted
Returns:
list of synsets
Example:
doc_to_synsets('Fish are nvqjp friends.')
Out: [Synset('fish.n.01'), Synset('be.v.01'), Synset('friend.n.01')]
"""
tokens = nltk.word_tokenize(doc)
pos_tags = nltk.pos_tag(tokens)
wn_tags = [convert_tag(x[1]) for x in pos_tags]
# If there is nothing in the synset for the token, it must be skipped! Therefore check that len of the synset is > 0!
# Will return a list of lists of synsets - one list for each token!
# Remember to use only the first match for each token! Hence wn.synsets(x,y)[0]!
synset_list = [wn.synsets(x,y)[0] for x,y in zip(tokens, wn_tags) if len(wn.synsets(x,y))>0]
return synset_list
def similarity_score(s1, s2):
"""
Calculate the normalized similarity score of s1 onto s2
For each synset in s1, finds the synset in s2 with the largest similarity value.
Sum of all of the largest similarity values and normalize this value by dividing it by the
number of largest similarity values found.
Args:
s1, s2: list of synsets from doc_to_synsets
Returns:
normalized similarity score of s1 onto s2
Example:
synsets1 = doc_to_synsets('I like cats')
synsets2 = doc_to_synsets('I like dogs')
similarity_score(synsets1, synsets2)
Out: 0.73333333333333339
"""
max_sim = []
for syn in s1:
sim = [syn.path_similarity(x) for x in s2 if syn.path_similarity(x) is not None]
if sim:
max_sim.append(max(sim))
return np.mean(max_sim)
def document_path_similarity(doc1, doc2):
"""Finds the symmetrical similarity between doc1 and doc2"""
synsets1 = doc_to_synsets(doc1)
synsets2 = doc_to_synsets(doc2)
return (similarity_score(synsets1, synsets2) + similarity_score(synsets2, synsets1)) / 2
###Output
[nltk_data] Downloading package punkt to /home/jovyan/nltk_data...
[nltk_data] Unzipping tokenizers/punkt.zip.
[nltk_data] Downloading package wordnet to /home/jovyan/nltk_data...
[nltk_data] Unzipping corpora/wordnet.zip.
[nltk_data] Downloading package averaged_perceptron_tagger to
[nltk_data] /home/jovyan/nltk_data...
[nltk_data] Unzipping taggers/averaged_perceptron_tagger.zip.
###Markdown
test_document_path_similarityUse this function to check if doc_to_synsets and similarity_score are correct.*This function should return the similarity score as a float.*
###Code
def test_document_path_similarity():
doc1 = 'This is a function to test document_path_similarity.'
doc2 = 'Use this function to see if your code in doc_to_synsets \
and similarity_score is correct!'
return document_path_similarity(doc1, doc2)
###Output
_____no_output_____
###Markdown
___`paraphrases` is a DataFrame which contains the following columns: `Quality`, `D1`, and `D2`.`Quality` is an indicator variable which indicates if the two documents `D1` and `D2` are paraphrases of one another (1 for paraphrase, 0 for not paraphrase).
###Code
# Use this dataframe for questions most_similar_docs and label_accuracy
paraphrases = pd.read_csv('paraphrases.csv')
paraphrases.head()
###Output
_____no_output_____
###Markdown
___ most_similar_docsUsing `document_path_similarity`, find the pair of documents in paraphrases which has the maximum similarity score.*This function should return a tuple `(D1, D2, similarity_score)`*
###Code
def most_similar_docs():
sim_scores = [document_path_similarity(x,y) for x,y in zip(paraphrases['D1'], paraphrases['D2'])]
return (paraphrases.loc[np.argmax(sim_scores),'D1'], paraphrases.loc[np.argmax(sim_scores),'D2'], max(sim_scores))
most_similar_docs()
###Output
_____no_output_____
###Markdown
label_accuracyProvide labels for the twenty pairs of documents by computing the similarity for each pair using `document_path_similarity`. Let the classifier rule be that if the score is greater than 0.75, label is paraphrase (1), else label is not paraphrase (0). Report accuracy of the classifier using scikit-learn's accuracy_score.*This function should return a float.*
###Code
def label_accuracy():
from sklearn.metrics import accuracy_score
paraphrases['sim_scores'] = [document_path_similarity(x,y) for x,y in zip(paraphrases['D1'], paraphrases['D2'])]
paraphrases['sim_scores'] = np.where(paraphrases['sim_scores']>0.75, 1, 0)
return accuracy_score(paraphrases['Quality'], paraphrases['sim_scores'])
label_accuracy()
###Output
_____no_output_____
###Markdown
Part 2 - Topic ModellingFor the second part of this assignment, you will use Gensim's LDA (Latent Dirichlet Allocation) model to model topics in `newsgroup_data`. You will first need to finish the code in the cell below by using gensim.models.ldamodel.LdaModel constructor to estimate LDA model parameters on the corpus, and save to the variable `ldamodel`. Extract 10 topics using `corpus` and `id_map`, and with `passes=25` and `random_state=34`.
###Code
import pickle
import gensim
from sklearn.feature_extraction.text import CountVectorizer
# Load the list of documents
with open('newsgroups', 'rb') as f:
newsgroup_data = pickle.load(f)
# Use CountVectorizor to find three letter tokens, remove stop_words,
# remove tokens that don't appear in at least 20 documents,
# remove tokens that appear in more than 20% of the documents
vect = CountVectorizer(min_df=20, max_df=0.2, stop_words='english',
token_pattern='(?u)\\b\\w\\w\\w+\\b')
# Fit and transform
X = vect.fit_transform(newsgroup_data)
# Convert sparse matrix to gensim corpus.
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
# Mapping from word IDs to words (To be used in LdaModel's id2word parameter)
id_map = dict((v, k) for k, v in vect.vocabulary_.items())
# Use the gensim.models.ldamodel.LdaModel constructor to estimate
# LDA model parameters on the corpus, and save to the variable `ldamodel`
# Your code here:
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=10, id2word=id_map, passes=25, random_state=34)
###Output
_____no_output_____
###Markdown
lda_topicsUsing `ldamodel`, find a list of the 10 topics and the most significant 10 words in each topic. This should be structured as a list of 10 tuples where each tuple takes on the form:`(9, '0.068*"space" + 0.036*"nasa" + 0.021*"science" + 0.020*"edu" + 0.019*"data" + 0.017*"shuttle" + 0.015*"launch" + 0.015*"available" + 0.014*"center" + 0.014*"sci"')`for example.*This function should return a list of tuples.*
###Code
def lda_topics():
# Your Code Here
return list(ldamodel.show_topics(num_topics=10, num_words=10))
lda_topics()
###Output
_____no_output_____
###Markdown
topic_distributionFor the new document `new_doc`, find the topic distribution. Remember to use vect.transform on the the new doc, and Sparse2Corpus to convert the sparse matrix to gensim corpus.*This function should return a list of tuples, where each tuple is `(topic, probability)`*
###Code
new_doc = ["\n\nIt's my understanding that the freezing will start to occur because \
of the\ngrowing distance of Pluto and Charon from the Sun, due to it's\nelliptical orbit. \
It is not due to shadowing effects. \n\n\nPluto can shadow Charon, and vice-versa.\n\nGeorge \
Krumins\n-- "]
def topic_distribution():
sparse_doc = vect.transform(new_doc)
gen_corpus = gensim.matutils.Sparse2Corpus(sparse_doc, documents_columns=False)
return list(ldamodel[gen_corpus])[0] # It's a list of lists! You just want the first one.
#return list(ldamodel.show_topics(num_topics=10, num_words=10)) # For topic_names
topic_distribution()
###Output
_____no_output_____
###Markdown
topic_namesFrom the list of the following given topics, assign topic names to the topics you found. If none of these names best matches the topics you found, create a new 1-3 word "title" for the topic.Topics: Health, Science, Automobiles, Politics, Government, Travel, Computers & IT, Sports, Business, Society & Lifestyle, Religion, Education.*This function should return a list of 10 strings.*
###Code
def topic_names():
return ['Education','Science','Computers & IT','Religion','Automobiles','Sports','Science','Religion','Computers & IT','Science']
topic_names()
###Output
_____no_output_____
###Markdown
---_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._--- Assignment 4 - Document Similarity & Topic Modelling Part 1 - Document SimilarityFor the first part of this assignment, you will complete the functions `doc_to_synsets` and `similarity_score` which will be used by `document_path_similarity` to find the path similarity between two documents.The following functions are provided:* **`convert_tag:`** converts the tag given by `nltk.pos_tag` to a tag used by `wordnet.synsets`. You will need to use this function in `doc_to_synsets`.* **`document_path_similarity:`** computes the symmetrical path similarity between two documents by finding the synsets in each document using `doc_to_synsets`, then computing similarities using `similarity_score`.You will need to finish writing the following functions:* **`doc_to_synsets:`** returns a list of synsets in document. This function should first tokenize and part of speech tag the document using `nltk.word_tokenize` and `nltk.pos_tag`. Then it should find each tokens corresponding synset using `wn.synsets(token, wordnet_tag)`. The first synset match should be used. If there is no match, that token is skipped.* **`similarity_score:`** returns the normalized similarity score of a list of synsets (s1) onto a second list of synsets (s2). For each synset in s1, find the synset in s2 with the largest similarity value. Sum all of the largest similarity values together and normalize this value by dividing it by the number of largest similarity values found. Be careful with data types, which should be floats. Missing values should be ignored.Once `doc_to_synsets` and `similarity_score` have been completed, submit to the autograder which will run `test_document_path_similarity` to test that these functions are running correctly. *Do not modify the functions `convert_tag`, `document_path_similarity`, and `test_document_path_similarity`.*
###Code
import numpy as np
import nltk
from nltk.corpus import wordnet as wn
import pandas as pd
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
def convert_tag(tag):
"""Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets"""
#tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
tag_dict = {'D': 'n', 'I': 'n', 'P': 'n', 'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
def doc_to_synsets(doc):
"""
Returns a list of synsets in document.
Tokenizes and tags the words in the document doc.
Then finds the first synset for each word/tag combination.
If a synset is not found for that combination it is skipped.
Args:
doc: string to be converted
Returns:
list of synsets
Example:
doc_to_synsets('Fish are nvqjp friends.')
Out: [Synset('fish.n.01'), Synset('be.v.01'), Synset('friend.n.01')]
"""
# Your Code Here
text = nltk.word_tokenize(doc)
tags = nltk.pos_tag(text)
doc_tag = [convert_tag(tag[1]) for tag in tags]
#print(text, tags, doc_tag)
syn_list = [wn.synsets(tag[0],syntag) for tag, syntag in zip(tags, doc_tag) if syntag]
return [syn_[0] for syn_ in syn_list if len(syn_)>0 ] # Your Answer Here if len(syn_)>0
def similarity_score(s1, s2):
"""
Calculate the normalized similarity score of s1 onto s2
For each synset in s1, finds the synset in s2 with the largest similarity value.
Sum of all of the largest similarity values and normalize this value by dividing it by the
number of largest similarity values found.
Args:
s1, s2: list of synsets from doc_to_synsets
Returns:
normalized similarity score of s1 onto s2
Example:
synsets1 = doc_to_synsets('I like cats')
synsets2 = doc_to_synsets('I like dogs')
similarity_score(synsets1, synsets2)
Out: 0.73333333333333339
"""
# Your Code Here
val = np.zeros((len(s1),))
for i, syn1 in enumerate(s1):
tempval = np.zeros((len(s2),))
for j, syn2 in enumerate(s2):
symm = syn1.path_similarity(syn2)
tempval[j] = symm if symm else np.nan
#print(syn1, syn2, tempval)
if (np.isnan(tempval)).sum() == len(s2):
val[i] = np.nan
else:
val[i] = np.nanmax(tempval)
val = val[~np.isnan(val)]
#print(val)
return val.sum()/val.shape[0] # Your Answer Here
def document_path_similarity(doc1, doc2):
"""Finds the symmetrical similarity between doc1 and doc2"""
synsets1 = doc_to_synsets(doc1)
synsets2 = doc_to_synsets(doc2)
#print(synsets1, synsets2)
return (similarity_score(synsets1, synsets2) + similarity_score(synsets2, synsets1)) / 2
###Output
_____no_output_____
###Markdown
test_document_path_similarityUse this function to check if doc_to_synsets and similarity_score are correct.*This function should return the similarity score as a float.*
###Code
def test_document_path_similarity():
doc1 = 'This is a function to test document_path_similarity.'
doc2 = 'Use this function to see if your code in doc_to_synsets \
and similarity_score is correct!'
return document_path_similarity(doc1, doc2)
test_document_path_similarity()
###Output
_____no_output_____
###Markdown
___`paraphrases` is a DataFrame which contains the following columns: `Quality`, `D1`, and `D2`.`Quality` is an indicator variable which indicates if the two documents `D1` and `D2` are paraphrases of one another (1 for paraphrase, 0 for not paraphrase).
###Code
# Use this dataframe for questions most_similar_docs and label_accuracy
paraphrases = pd.read_csv('paraphrases.csv')
paraphrases.head()
###Output
_____no_output_____
###Markdown
___ most_similar_docsUsing `document_path_similarity`, find the pair of documents in paraphrases which has the maximum similarity score.*This function should return a tuple `(D1, D2, similarity_score)`*
###Code
def most_similar_docs():
# Your Code Here
para_index = paraphrases.index.values
most_sim = 0
for idx in para_index:
this_sim = document_path_similarity(paraphrases.loc[idx,'D1'],paraphrases.loc[idx,'D2'])
#print(this_sim)
if this_sim > most_sim:
D1 = paraphrases.loc[idx,'D1']
D2 = paraphrases.loc[idx,'D2']
most_sim = this_sim
return D1, D2, most_sim # Your Answer Here
most_similar_docs()
###Output
_____no_output_____
###Markdown
label_accuracyProvide labels for the twenty pairs of documents by computing the similarity for each pair using `document_path_similarity`. Let the classifier rule be that if the score is greater than 0.75, label is paraphrase (1), else label is not paraphrase (0). Report accuracy of the classifier using scikit-learn's accuracy_score.*This function should return a float.*
###Code
def label_accuracy():
from sklearn.metrics import accuracy_score
# Your Code Here
para_copy = paraphrases.copy()
para_copy['Q_pred'] = 0
for idx in para_copy.index.values:
sim_score = document_path_similarity(para_copy.loc[idx,'D1'],para_copy.loc[idx,'D2'])
para_copy.loc[idx,'Q_pred'] = 1 if sim_score>.75 else 0
#print(accuracy_score(para_copy['Quality'],para_copy['Q_pred']))
return accuracy_score(para_copy['Quality'],para_copy['Q_pred'])# Your Answer Here
label_accuracy()
###Output
_____no_output_____
###Markdown
Part 2 - Topic ModellingFor the second part of this assignment, you will use Gensim's LDA (Latent Dirichlet Allocation) model to model topics in `newsgroup_data`. You will first need to finish the code in the cell below by using gensim.models.ldamodel.LdaModel constructor to estimate LDA model parameters on the corpus, and save to the variable `ldamodel`. Extract 10 topics using `corpus` and `id_map`, and with `passes=25` and `random_state=34`.
###Code
import pickle
import gensim
from sklearn.feature_extraction.text import CountVectorizer
# Load the list of documents
with open('newsgroups', 'rb') as f:
newsgroup_data = pickle.load(f)
# Use CountVectorizor to find three letter tokens, remove stop_words,
# remove tokens that don't appear in at least 20 documents,
# remove tokens that appear in more than 20% of the documents
vect = CountVectorizer(min_df=20, max_df=0.2, stop_words='english',
token_pattern='(?u)\\b\\w\\w\\w+\\b')
# Fit and transform
X = vect.fit_transform(newsgroup_data)
# Convert sparse matrix to gensim corpus.
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
# Mapping from word IDs to words (To be used in LdaModel's id2word parameter)
id_map = dict((v, k) for k, v in vect.vocabulary_.items())
# Use the gensim.models.ldamodel.LdaModel constructor to estimate
# LDA model parameters on the corpus, and save to the variable `ldamodel`
# Your code here:
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=10, id2word=id_map, passes=25, random_state=34)
###Output
_____no_output_____
###Markdown
lda_topicsUsing `ldamodel`, find a list of the 10 topics and the most significant 10 words in each topic. This should be structured as a list of 10 tuples where each tuple takes on the form:`(9, '0.068*"space" + 0.036*"nasa" + 0.021*"science" + 0.020*"edu" + 0.019*"data" + 0.017*"shuttle" + 0.015*"launch" + 0.015*"available" + 0.014*"center" + 0.014*"sci"')`for example.*This function should return a list of tuples.*
###Code
def lda_topics():
# Your Code Here
return ldamodel.print_topics(num_topics=10, num_words=10)# Your Answer Here
lda_topics()
###Output
_____no_output_____
###Markdown
topic_distributionFor the new document `new_doc`, find the topic distribution. Remember to use vect.transform on the the new doc, and Sparse2Corpus to convert the sparse matrix to gensim corpus.*This function should return a list of tuples, where each tuple is `(topic, probability)`*
###Code
new_doc = ["\n\nIt's my understanding that the freezing will start to occur because \
of the\ngrowing distance of Pluto and Charon from the Sun, due to it's\nelliptical orbit. \
It is not due to shadowing effects. \n\n\nPluto can shadow Charon, and vice-versa.\n\nGeorge \
Krumins\n-- "]
def topic_distribution():
# Your Code Here
new_X = vect.transform(new_doc)
new_corpus = gensim.matutils.Sparse2Corpus(new_X, documents_columns=False)
res = ldamodel[new_corpus]
return list(res)[0]# Your Answer Here
topic_distribution()
###Output
_____no_output_____
###Markdown
topic_namesFrom the list of the following given topics, assign topic names to the topics you found. If none of these names best matches the topics you found, create a new 1-3 word "title" for the topic.Topics: Health, Science, Automobiles, Politics, Government, Travel, Computers & IT, Sports, Business, Society & Lifestyle, Religion, Education.*This function should return a list of 10 strings.*
###Code
ldamodel.print_topics(num_topics=10, num_words=30)[2]
def topic_names():
# Your Code Here
topic_names = ['Education','Computers & IT','Computers & IT','Religion','Automobiles',
'Sports','Health','Politics','Computers & IT','Computers & IT']
return topic_names# Your Answer Here
topic_names()
###Output
_____no_output_____ |
LAB2/.ipynb_checkpoints/LAB2-checkpoint.ipynb | ###Markdown
PHYS 434 Lab 1 Haowen GuanInitializing the environment as below:
###Code
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy
from scipy import stats
import scipy.signal
plt.rcParams["figure.figsize"] = (15,10)
###Output
_____no_output_____
###Markdown
Finding Gamma-ray source In a observation, the background noise is a kind of cosmic-rays that follows a Poisson distruibution. Assume the average cosmic-ray background is 5, and the average number of gamma-rays emitted by the hypothetical source is 10.
###Code
x = np.linspace(0, 15, 15 + 1)
results = stats.poisson.pmf(k = x, mu = 5)
fig, ax = plt.subplots(2,2)
fig.set_size_inches(20,14)
ax[0,0].plot(x, results)
# ax[0].set_xlim([0, 50])
ax[0,0].set_title("1 Days Poison distribution", fontsize=20)
ax[0,0].tick_params(labelsize = 24)
ax[0,0].set_xlabel("ray value", fontsize=20)
ax[0,0].set_ylabel("Probability", fontsize=24)
conv = results
for i in range(4):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[0,1].plot(np.linspace(0, 75, 15 * 5 + 1), conv)
ax[0,1].set_title("5 Days convolution", fontsize=20)
ax[0,1].tick_params(labelsize = 24)
ax[0,1].set_xlabel("ray value", fontsize=20)
ax[0,1].set_ylabel("Probability", fontsize=24)
for i in range(5):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[1,0].plot(np.linspace(0, 150, 15 * 10 + 1), conv)
ax[1,0].set_title("10 Days convolution", fontsize=20)
ax[1,0].tick_params(labelsize = 24)
ax[1,0].set_xlabel("ray value", fontsize=20)
ax[1,0].set_ylabel("Probability", fontsize=24)
for i in range(10):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[1,1].plot(np.linspace(0, 300, 15 * 20 + 1), conv)
ax[1,1].set_title("20 Days convolution", fontsize=20)
ax[1,1].tick_params(labelsize = 24)
ax[1,1].set_xlabel("ray value", fontsize=20)
ax[1,1].set_ylabel("Probability", fontsize=24)
###Output
_____no_output_____
###Markdown
**Observation: After integrate the probability distribtion for few days, It shifts right and its distribution get wider.** **1. B)**
###Code
x = np.linspace(0, 15, 16)
results = stats.poisson.pmf(k = x, mu = 5)
fig.set_size_inches(10,7)
conv = results
for i in range(4):
conv = scipy.signal.fftconvolve(results,conv,'full')
plt.plot(np.linspace(0,75,76), conv)
plt.tick_params(labelsize = 24)
plt.xlim([10, 50])
plt.title("5 Days convolution", fontsize=24)
plt.xlabel("ray value", fontsize=24)
plt.ylabel("Probability", fontsize=24)
###Output
_____no_output_____
###Markdown
**observation:** This is still a Poisson distribution. This make sense as convolution is esentially doing summnation. No matter how many times a poisson distrbution is add onto another same poisson distribution, they are still poisson distribution.
###Code
x = np.linspace(0, 15, 16)
results = stats.poisson.pmf(k = x, mu = 5)
fig, ax = plt.subplots(2,2)
fig.set_size_inches(20,14)
ax[0,0].plot(x, results)
# ax[0].set_xlim([0, 50])
ax[0,0].set_title("1 Days Poison distribution", fontsize=20)
ax[0,0].tick_params(labelsize = 24)
ax[0,0].set_xlabel("ray value", fontsize=20)
ax[0,0].set_ylabel("Probability", fontsize=24)
conv = results
for i in range(4):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[0,1].plot(np.linspace(0, 15, 15 * 5 + 1), conv)
ax[0,1].set_title("5 Days convolution average", fontsize=20)
ax[0,1].tick_params(labelsize = 24)
ax[0,1].set_xlabel("ray value", fontsize=20)
ax[0,1].set_ylabel("Probability", fontsize=24)
for i in range(5):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[1,0].plot(np.linspace(0, 15, 15 * 10 + 1), conv)
ax[1,0].set_title("10 Days convolution average", fontsize=20)
ax[1,0].tick_params(labelsize = 24)
ax[1,0].set_xlabel("ray value", fontsize=20)
ax[1,0].set_ylabel("Probability", fontsize=24)
for i in range(10):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[1,1].plot(np.linspace(0, 15, 15 * 20 + 1), conv)
ax[1,1].set_title("20 Days convolution average", fontsize=20)
ax[1,1].tick_params(labelsize = 24)
ax[1,1].set_xlabel("ray value", fontsize=20)
ax[1,1].set_ylabel("Probability", fontsize=24)
###Output
_____no_output_____
###Markdown
**observation:** As the number of days becomes larger, the convlution distribution become narrower, and more concentrate to the mean value. **Compare with summation** These graghs is esentially the same plot with the summation we see in previous part. While the reason we see the distribution is wider in summation and the agverage is narrower is that, in summation, distributions are accumulating and the width of central part only increase, but since the central parts increase faster than the edges as they have bigger values, the ratio of the central parts and fringe part are higher in convolution, so once we average the summation distribution back to original range, it seems narrower. **Central limit theorem** we can observe that, as the number of day increase, it's distribution is more and more tends to be a gaussian distribution. The reason is same as above, that as the convolute many times, the ratio of the central parts and fringe part gets bigger, thus the distribution tends to emphasize the central mean value, and the edge becomes less and less distinguishable. Evantually, it looks like a guassian distribution. 1. D) I picked N = 10. **Null hypothesis:** If there is no signal, then we expect the background to be Poisson distributed with a mean of 5 for a convolution of 10 days. The stronger the signal, the more events we expect. So what is the probability of observing $ >=10*10=100$ gamma-rays in 10 days?
###Code
x = np.linspace(0, 15, 16)
results = stats.poisson.pmf(k = x, mu = 5)
conv = results
for i in range(9):
conv = scipy.signal.fftconvolve(results,conv,'full')
probability = 0;
for i in range(99, 151):
probability += conv[i]
print("Probability of seeing 100 gamma rays in 10 days is: ", probability)
print("sigma is: ", stats.norm.ppf(1 - probability))
###Output
Probability of seeing 100 gamma rays in 10 days is: 4.110069130522364e-10
sigma is: 6.140617355145252
###Markdown
Problem 2**Continuous distribution:** Chi-Square distribtuion. **2. A)** Some demonstration of average over more observing intervals as below
###Code
x = np.linspace(0, 25, 10000)
results = stats.chi2.pdf(x, df = 5.)
fig, ax = plt.subplots(2,2)
fig.set_size_inches(20,14)
ax[0,0].plot(x, results)
# ax[0].set_xlim([0, 50])
ax[0,0].set_title("Chi-Square distribution", fontsize=20)
ax[0,0].tick_params(labelsize = 24)
ax[0,0].set_xlabel("x", fontsize=20)
ax[0,0].set_ylabel("Probability", fontsize=24)
conv = results
for i in range(4):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[0,1].plot(np.linspace(0, 25, 10000 * 5 - 4), conv)
ax[0,1].set_title("5 times convolution average", fontsize=20)
ax[0,1].tick_params(labelsize = 24)
ax[0,1].set_xlabel("x", fontsize=20)
ax[0,1].set_ylabel("Probability", fontsize=24)
for i in range(5):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[1,0].plot(np.linspace(0, 25, 10000 * 10 - 9), conv)
ax[1,0].set_title("10 times convolution average", fontsize=20)
ax[1,0].tick_params(labelsize = 24)
ax[1,0].set_xlabel("x", fontsize=20)
ax[1,0].set_ylabel("Probability", fontsize=24)
for i in range(10):
conv = scipy.signal.fftconvolve(results,conv,'full')
ax[1,1].plot(np.linspace(0, 25, 10000 * 20 - 19), conv)
ax[1,1].set_title("20 times convolution average", fontsize=20)
ax[1,1].tick_params(labelsize = 24)
ax[1,1].set_xlabel("x", fontsize=20)
ax[1,1].set_ylabel("Probability", fontsize=24)
###Output
_____no_output_____
###Markdown
**Observation: with summing and averaging more observing intervals, its distribution get narrower.** **2. B)** Yes, it approaches a Guassian distribution. Start from N=10. Problem 3Assume the width of the background Gaussian distruibution is 2. **2. A)** Assume the signal I saw have a strength 6.7.
###Code
print("Probability of this detection is: ", 1 - stats.norm.cdf(6.7/2))
print("Sigma is: ", stats.norm.ppf(stats.norm.cdf(6.7/2)))
###Output
Probability of this detection is: 0.00040405780186403284
Sigma is: 3.349999999999992
###Markdown
We can't clain a discovery, since 3.35-sigma is less than the traditional value of 5-sigma **3. B)** The statistic question is, what is the probability distribution if we have 10k trials to consider?The probability distribution will be the 10k times convolution of the original distribution. I tried to do the 10000 times convolution below, but I failed to achieve it. The compution time is too high.
###Code
x = np.linspace(-10, 10, 10000)
results = stats.norm.pdf(x)
results = results * 10000
plt.plot(np.linspace(-10 * 2, 10 * 2, 10000), results)
plt.tick_params(labelsize = 24)
plt.title("5 Days convolution", fontsize=24)
plt.xlabel(x", fontsize=24)
plt.ylabel("Probability", fontsize=24)
###Output
_____no_output_____
###Markdown
**3. C)**
###Code
x = np.linspace(-10, 10, 10000)
results = stats.norm.pdf(x)
results = results * 10000
print("Probability of this detection is: ", 10000*(1 - stats.norm.cdf(6.7/2)))
###Output
Probability of this detection is: 4.040578018640328
Sigma is: -2.106673418553627
###Markdown
Problem 4 **3. A) & B)**
###Code
print("Version 1: signal correspond to a 5-sigma detection is ", 5 * 2)
print("Version 2: signal correspond to a 5-sigma detection is ", 2 * stats.norm.ppf(1 - ((1 - stats.norm.cdf(5))/10000)))
###Output
Version 1: signal correspond to a 5-sigma detection is 10
Version 2: signal correspond to a 5-sigma detection is 13.10107497378331
###Markdown
**3. C)** It just has to be 3.1 units brighter than the version 1.
###Code
x = np.linspace(-10, 10, 10000)
results = stats.norm.pdf(x)
fig, ax = plt.subplots(1,2)
fig.set_size_inches(20,7)
ax[0].plot(np.linspace(-10 * 2, 10 * 2, 10000), results)
ax[0].set_title("Version 1", fontsize=24)
ax[0].set_yscale("log")
ax[0].tick_params(labelsize = 24)
ax[0].set_xlabel("x", fontsize=24)
ax[0].set_ylabel("Probability", fontsize=24)
results = results * 10000
ax[1].plot(np.linspace(-10 * 2, 10 * 2, 10000), results)
ax[1].set_title("Version 2", fontsize=20)
ax[1].set_yscale("log")
ax[1].tick_params(labelsize = 24)
ax[1].set_xlabel("x", fontsize=20)
ax[1].set_ylabel("Probability", fontsize=24)
###Output
_____no_output_____
###Markdown
The reason the sensitivity penalty is so low is that, by taking 10k trials, it esentually lift the distribution up by 1000, but normal distribution drop exponentially, as far away from the mean. **3. D)** below is example of 100000000, it just increased from 13.1 to 15.6
###Code
print("Version 2: signal correspond to a 5-sigma detection is ", 2 * stats.norm.ppf(1 - ((1 - stats.norm.cdf(5))/100000000)))
###Output
Version 2: signal correspond to a 5-sigma detection is 15.61761499383638
|
module1-introduction-to-sql/Grill_Unit_3_Sprint_2_Assignment_1_Part_2.ipynb | ###Markdown
###Code
!wget https://github.com/serinamarie/DS-Unit-3-Sprint-2-SQL-and-Databases/raw/master/module1-introduction-to-sql/buddymove_holidayiq.csv
# connect to sqlite database
import sqlite3
conn_buddy = sqlite3.connect('buddymove_holidayiq.sqlite3')
csr = conn_buddy.cursor()
# name the csv
import pandas as pd
df = pd.read_csv('buddymove_holidayiq.csv')
# Familiarize yourself with the dataframe
df.tail()
# Convert csv to sql via the connection to the sqlite database
df.to_sql('review', conn_buddy, if_exists='replace')
# Take a look at your new table
csr.execute("SELECT * FROM review;").fetchall()
# How many rows do you have in this table?
query = """
SELECT COUNT(*) FROM review;
"""
csr.execute(query).fetchall()
# How many users who reviewed at least 100 Nature in the category
# also reviewed at least 100 in the Shopping Category
high_nature_shopping_query = '''
SELECT COUNT(*) FROM review
WHERE Nature >= 100
AND Shopping >= 100
'''
n_s = engine.execute(high_nature_shopping_query).fetchall()
# Double-check our answers by looking at counts of each query
print('Answer:', n_s)
avg_review_sports_query = '''
SELECT AVG(Sports)
FROM review
'''
# SELECT avg(c)
# FROM
# (
# SELECT character_id, COUNT(item_id) as c
# FROM charactercreator_character_inventory
# GROUP BY character_id
# )
print('Average # of Sports Reviews', engine.execute(avg_review_sports_query).fetchall())
avg_review_religious_query = '''
SELECT AVG(Religious)
FROM review
'''
print('Average # of Religious Reviews', engine.execute(avg_review_religious_query).fetchall())
avg_review_nature_query = '''
SELECT AVG(Nature)
FROM review
'''
print('Average # of Nature Reviews', engine.execute(avg_review_nature_query).fetchall())
avg_review_shopping_query = '''
SELECT AVG(Shopping)
FROM review
'''
print('Average # of Shopping Reviews', engine.execute(avg_review_shopping_query).fetchall())
avg_review_theatre_query = '''
SELECT AVG(Theatre)
FROM review
'''
print('Average # of Theatre Reviews', engine.execute(avg_review_theatre_query).fetchall())
avg_review_picnic_query = '''
SELECT AVG(Picnic)
FROM review
'''
print('Average # of Picnic Reviews', engine.execute(avg_review_picnic_query).fetchall())
###Output
Average # of Picnic Reviews [(120.40160642570281,)]
|
Mapas.ipynb | ###Markdown
###Code
!pip install folium
!pip install geopandas
import folium
import pandas as pd
import geopandas
import random
df=pd.read_csv('Obesidad2.csv')
print(df.head())
m = folium.Map(
location=[23.63, -102.55],
zoom_start=5,
tiles='Stamen Terrain'
)
df1=geopandas.read_file('mexican_states.geojson')
r = lambda: random.randint(0,255)
t = lambda: random.randint(0,255)
for i in range(32):
style={'fillColor':'#%02X%02X%02X' % (r(),r(),r()),'color':'#%02X%02X%02X' % (t(),t(),t())}
d=df1.iat[i,-1]
folium.GeoJson(d,name='geojson',style_function=lambda x:style).add_to(m)
tooltip = 'Click me!'
folium.Marker([21.8818,-102.291], popup='<i>Aguascalientes</i>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[0]), tooltip=tooltip).add_to(m)
folium.Marker([31, -115.4], popup='<b>Baja California</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[1]), tooltip=tooltip).add_to(m)
folium.Marker([24.1333, -110.3], popup='<b>Baja California Sur</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[2]), tooltip=tooltip).add_to(m)
folium.Marker([19.8454, -90.5237], popup='<b>Campeche</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[3]), tooltip=tooltip).add_to(m)
folium.Marker([16.75, -93.1167], popup='<b>Chiapas</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[4]), tooltip=tooltip).add_to(m)
folium.Marker([28.6353, -106.089], popup='<b>Chihuahua</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[5]), tooltip=tooltip).add_to(m)
folium.Marker([19.4978, -99.1269], popup='<b>Ciudad de México</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[6]), tooltip=tooltip).add_to(m)
folium.Marker([25.5444, -103.442], popup='<b>Coahuila</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[7]), tooltip=tooltip).add_to(m)
folium.Marker([19.2433, -103.725], popup='<b>Colima</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[8]), tooltip=tooltip).add_to(m)
folium.Marker([24.0277, -104.653], popup='<b>Durango</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[9]), tooltip=tooltip).add_to(m)
folium.Marker([19.28786, -99.65324], popup='<b>Estado de México</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[10]), tooltip=tooltip).add_to(m)
folium.Marker([21.0181, -101.258], popup='<b>Guanajuato</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[11]), tooltip=tooltip).add_to(m)
folium.Marker([16.8638, -99.8816], popup='<b>Guerrero</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[12]), tooltip=tooltip).add_to(m)
folium.Marker([20.1153, -98.7459], popup='<b>Hidalgo</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[13]), tooltip=tooltip).add_to(m)
folium.Marker([20.66682, -103.39182], popup='<b>Jalisco</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[14]), tooltip=tooltip).add_to(m)
folium.Marker([19.7006, -101.186], popup='<b>Michoacan</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[15]), tooltip=tooltip).add_to(m)
folium.Marker([18.9261, -99.23075], popup='<b>Morelos</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[16]), tooltip=tooltip).add_to(m)
folium.Marker([21.50951, -104.89569], popup='<b>Nayarit</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[17]), tooltip=tooltip).add_to(m)
folium.Marker([25.67507, -100.31847], popup='<b>Nuevo León</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[18]), tooltip=tooltip).add_to(m)
folium.Marker([17.06542, -96.72365], popup='<b>Oaxaca</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[19]), tooltip=tooltip).add_to(m)
folium.Marker([19.03793, -98.20346], popup='<b>Puebla</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[20]), tooltip=tooltip).add_to(m)
folium.Marker([20.58806, -100.38806], popup='<b>Queretaro</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[21]), tooltip=tooltip).add_to(m)
folium.Marker([18.51413, -88.30381], popup='<b>Quintana Roo</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[22]), tooltip=tooltip).add_to(m)
folium.Marker([22.1498200, -100.97916], popup='<b>San Luis Potosí</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[23]), tooltip=tooltip).add_to(m)
folium.Marker([24.79032, -107.38782], popup='<b>Sinaloa</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[24]), tooltip=tooltip).add_to(m)
folium.Marker([29.1026, -110.97732], popup='<b>Sonora</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[25]), tooltip=tooltip).add_to(m)
folium.Marker([17.98689, -92.93028], popup='<b>Tabasco</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[26]), tooltip=tooltip).add_to(m)
folium.Marker([23.74174, -99.14599], popup='<b>Tamaulipas</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[27]), tooltip=tooltip).add_to(m)
folium.Marker([19.31905, -98.19982], popup='<b>Tlaxcala</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[28]), tooltip=tooltip).add_to(m)
folium.Marker([19.53124, -96.91589], popup='<b>Veracruz</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[29]), tooltip=tooltip).add_to(m)
folium.Marker([20.97, -89.62], popup='<b>Yucatan</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[30]), tooltip=tooltip).add_to(m)
folium.Marker([22.76843, -102.58141], popup='<b>Zacatecas</b>'+str("\nPorcentaje de obesidad\n")+str(df["Porcentaje de poblacion de 20 anos y mas con obesidad"].iloc[31]), tooltip=tooltip).add_to(m)
m
m1 = folium.Map(
location=[23.63, -102.55],
zoom_start=5,
tiles='Stamen Terrain'
)
r = lambda: random.randint(0,255)
t = lambda: random.randint(0,255)
for i in range(32):
style={'fillColor':'#%02X%02X%02X' % (r(),r(),r()),'color':'#%02X%02X%02X' % (t(),t(),t())}
d=df1.iat[i,-1]
folium.GeoJson(d,name='geojson',style_function=lambda x:style).add_to(m1)
tooltip = 'Click me!'
folium.Marker([21.8818,-102.291], popup='<i>Aguascalientes</i>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[0]), tooltip=tooltip).add_to(m1)
folium.Marker([31, -115.4], popup='<b>Baja California</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[1]), tooltip=tooltip).add_to(m1)
folium.Marker([24.1333, -110.3], popup='<b>Baja California Sur</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[2]), tooltip=tooltip).add_to(m1)
folium.Marker([19.8454, -90.5237], popup='<b>Campeche</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[3]), tooltip=tooltip).add_to(m1)
folium.Marker([16.75, -93.1167], popup='<b>Chiapas</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[4]), tooltip=tooltip).add_to(m1)
folium.Marker([28.6353, -106.089], popup='<b>Chihuahua</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[5]), tooltip=tooltip).add_to(m1)
folium.Marker([19.4978, -99.1269], popup='<b>Ciudad de México</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[6]), tooltip=tooltip).add_to(m1)
folium.Marker([25.5444, -103.442], popup='<b>Coahuila</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[7]), tooltip=tooltip).add_to(m1)
folium.Marker([19.2433, -103.725], popup='<b>Colima</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[8]), tooltip=tooltip).add_to(m1)
folium.Marker([24.0277, -104.653], popup='<b>Durango</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[9]), tooltip=tooltip).add_to(m1)
folium.Marker([19.28786, -99.65324], popup='<b>Estado de México</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[10]), tooltip=tooltip).add_to(m1)
folium.Marker([21.0181, -101.258], popup='<b>Guanajuato</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[11]), tooltip=tooltip).add_to(m1)
folium.Marker([16.8638, -99.8816], popup='<b>Guerrero</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[12]), tooltip=tooltip).add_to(m1)
folium.Marker([20.1153, -98.7459], popup='<b>Hidalgo</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[13]), tooltip=tooltip).add_to(m1)
folium.Marker([20.66682, -103.39182], popup='<b>Jalisco</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[14]), tooltip=tooltip).add_to(m1)
folium.Marker([19.7006, -101.186], popup='<b>Michoacan</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[15]), tooltip=tooltip).add_to(m1)
folium.Marker([18.9261, -99.23075], popup='<b>Morelos</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[16]), tooltip=tooltip).add_to(m1)
folium.Marker([21.50951, -104.89569], popup='<b>Nayarit</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[17]), tooltip=tooltip).add_to(m1)
folium.Marker([25.67507, -100.31847], popup='<b>Nuevo León</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[18]), tooltip=tooltip).add_to(m1)
folium.Marker([17.06542, -96.72365], popup='<b>Oaxaca</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[19]), tooltip=tooltip).add_to(m1)
folium.Marker([19.03793, -98.20346], popup='<b>Puebla</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[20]), tooltip=tooltip).add_to(m1)
folium.Marker([20.58806, -100.38806], popup='<b>Queretaro</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[21]), tooltip=tooltip).add_to(m1)
folium.Marker([18.51413, -88.30381], popup='<b>Quintana Roo</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[22]), tooltip=tooltip).add_to(m1)
folium.Marker([22.1498200, -100.97916], popup='<b>San Luis Potosí</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[23]), tooltip=tooltip).add_to(m1)
folium.Marker([24.79032, -107.38782], popup='<b>Sinaloa</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[24]), tooltip=tooltip).add_to(m1)
folium.Marker([29.1026, -110.97732], popup='<b>Sonora</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[25]), tooltip=tooltip).add_to(m1)
folium.Marker([17.98689, -92.93028], popup='<b>Tabasco</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[26]), tooltip=tooltip).add_to(m1)
folium.Marker([23.74174, -99.14599], popup='<b>Tamaulipas</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[27]), tooltip=tooltip).add_to(m1)
folium.Marker([19.31905, -98.19982], popup='<b>Tlaxcala</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[28]), tooltip=tooltip).add_to(m1)
folium.Marker([19.53124, -96.91589], popup='<b>Veracruz</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[29]), tooltip=tooltip).add_to(m1)
folium.Marker([20.97, -89.62], popup='<b>Yucatan</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[30]), tooltip=tooltip).add_to(m1)
folium.Marker([22.76843, -102.58141], popup='<b>Zacatecas</b>'+str("\nPorcentaje de hipertension\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de hipertensión."].iloc[31]), tooltip=tooltip).add_to(m1)
m1
m3 = folium.Map(
location=[23.63, -102.55],
zoom_start=5,
tiles='Stamen Terrain'
)
r = lambda: random.randint(0,255)
t = lambda: random.randint(0,255)
for i in range(32):
style={'fillColor':'#%02X%02X%02X' % (r(),r(),r()),'color':'#%02X%02X%02X' % (t(),t(),t())}
d=df1.iat[i,-1]
folium.GeoJson(d,name='geojson',style_function=lambda x:style).add_to(m3)
tooltip = 'Click me!'
folium.Marker([21.8818,-102.291], popup='<i>Aguascalientes</i>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[0]), tooltip=tooltip).add_to(m3)
folium.Marker([31, -115.4], popup='<b>Baja California</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[1]), tooltip=tooltip).add_to(m3)
folium.Marker([24.1333, -110.3], popup='<b>Baja California Sur</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[2]), tooltip=tooltip).add_to(m3)
folium.Marker([19.8454, -90.5237], popup='<b>Campeche</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[3]), tooltip=tooltip).add_to(m3)
folium.Marker([16.75, -93.1167], popup='<b>Chiapas</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[4]), tooltip=tooltip).add_to(m3)
folium.Marker([28.6353, -106.089], popup='<b>Chihuahua</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[5]), tooltip=tooltip).add_to(m3)
folium.Marker([19.4978, -99.1269], popup='<b>Ciudad de México</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[6]), tooltip=tooltip).add_to(m3)
folium.Marker([25.5444, -103.442], popup='<b>Coahuila</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[7]), tooltip=tooltip).add_to(m3)
folium.Marker([19.2433, -103.725], popup='<b>Colima</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[8]), tooltip=tooltip).add_to(m3)
folium.Marker([24.0277, -104.653], popup='<b>Durango</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[9]), tooltip=tooltip).add_to(m3)
folium.Marker([19.28786, -99.65324], popup='<b>Estado de México</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[10]), tooltip=tooltip).add_to(m3)
folium.Marker([21.0181, -101.258], popup='<b>Guanajuato</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[11]), tooltip=tooltip).add_to(m3)
folium.Marker([16.8638, -99.8816], popup='<b>Guerrero</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[12]), tooltip=tooltip).add_to(m3)
folium.Marker([20.1153, -98.7459], popup='<b>Hidalgo</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[13]), tooltip=tooltip).add_to(m3)
folium.Marker([20.66682, -103.39182], popup='<b>Jalisco</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[14]), tooltip=tooltip).add_to(m3)
folium.Marker([19.7006, -101.186], popup='<b>Michoacan</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[15]), tooltip=tooltip).add_to(m3)
folium.Marker([18.9261, -99.23075], popup='<b>Morelos</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[16]), tooltip=tooltip).add_to(m3)
folium.Marker([21.50951, -104.89569], popup='<b>Nayarit</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[17]), tooltip=tooltip).add_to(m3)
folium.Marker([25.67507, -100.31847], popup='<b>Nuevo León</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[18]), tooltip=tooltip).add_to(m3)
folium.Marker([17.06542, -96.72365], popup='<b>Oaxaca</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[19]), tooltip=tooltip).add_to(m3)
folium.Marker([19.03793, -98.20346], popup='<b>Puebla</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[20]), tooltip=tooltip).add_to(m3)
folium.Marker([20.58806, -100.38806], popup='<b>Queretaro</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[21]), tooltip=tooltip).add_to(m3)
folium.Marker([18.51413, -88.30381], popup='<b>Quintana Roo</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[22]), tooltip=tooltip).add_to(m3)
folium.Marker([22.1498200, -100.97916], popup='<b>San Luis Potosí</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[23]), tooltip=tooltip).add_to(m3)
folium.Marker([24.79032, -107.38782], popup='<b>Sinaloa</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[24]), tooltip=tooltip).add_to(m3)
folium.Marker([29.1026, -110.97732], popup='<b>Sonora</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[25]), tooltip=tooltip).add_to(m3)
folium.Marker([17.98689, -92.93028], popup='<b>Tabasco</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[26]), tooltip=tooltip).add_to(m)
folium.Marker([23.74174, -99.14599], popup='<b>Tamaulipas</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[27]), tooltip=tooltip).add_to(m3)
folium.Marker([19.31905, -98.19982], popup='<b>Tlaxcala</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[28]), tooltip=tooltip).add_to(m3)
folium.Marker([19.53124, -96.91589], popup='<b>Veracruz</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[29]), tooltip=tooltip).add_to(m3)
folium.Marker([20.97, -89.62], popup='<b>Yucatan</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[30]), tooltip=tooltip).add_to(m3)
folium.Marker([22.76843, -102.58141], popup='<b>Zacatecas</b>'+str("\nPorcentaje de diabetes\n")+str(df["Porcentaje de población de 20 años y más con diagnóstico previo de diabetes."].iloc[31]), tooltip=tooltip).add_to(m3)
m3
###Output
_____no_output_____ |
articles/Prettierplotprimer.ipynb | ###Markdown
__Table of contents__1. [Basic plots](Basic-plots) 1. [Example 1](Example-1) 1. [Example 2](Example-2) 1. [Example 3](Example-3)1. [Advanced plots](Advanced-plots) 1. [Example 4](Example-4) 1. [Example 5](Example-5) 1. [Example 6](Example-6) 1. [Example 7](Example-7)
###Code
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# data extensions and settings
import numpy as np
import pandas as pd
# modeling extensions
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LogisticRegression, SGDRegressor
from sklearn.model_selection import KFold, train_test_split, GridSearchCV, StratifiedKFold, cross_val_score, RandomizedSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
# visualization extensions and settings
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
try:
# import mlmachine as mlm
# from prettierplot.plotter import PrettierPlot
# import prettierplot.style as style
# import prettierplot.data as data
import asdfasd
except ModuleNotFoundError:
import sys
sys.path.append("../../mlmachine") if "../../../mlmachine" not in sys.path else None
sys.path.append("../../prettierplot") if "../../../prettierplot" not in sys.path else None
import mlmachine as mlm
from prettierplot.plotter import PrettierPlot
import prettierplot.data as data
import prettierplot.style as style
else:
print('This notebook relies on the libraries mlmachine and prettierplot. Please run:')
print('\tpip install mlmachine')
print('\tpip install prettierplot')
###Output
_____no_output_____
###Markdown
Basic plots Example 1 __Load data__
###Code
# load employee attition dataset
df = data.attrition()
# capture unique EmployeeField values and frequency counts
unique_vals, unique_counts = np.unique(
df[df["EducationField"].notnull()]["EducationField"], return_counts=True
)
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
# setup figure
fig = plt.figure()
plt.style.use('default')
plt.figure(facecolor="white")
# create plot
plt.bar(x=unique_vals, height=unique_counts)
# decorate
plt.title("Education field")
plt.ylabel("Category counts")
plt.show()
###Output
_____no_output_____
###Markdown
__Pretty visualization__
###Code
# create plotting instance
p = PrettierPlot(chart_scale=10)
# create Axes object and decorate
ax = p.make_canvas(title="Education field category counts", y_label="Category counts", y_shift=0.47)
# add plots
p.bar_v(
x=unique_vals,
counts=unique_counts,
label_rotate=90,
ax=ax,
)
###Output
_____no_output_____
###Markdown
Example 2 __Load data__
###Code
# load housing dataset
df, _ = data.housing()
# set predictor and target features
X = df["1stFlrSF"].values.reshape(-1,1)
y = df["SalePrice"].values.reshape(-1,1)
# instantiate model object and fit
model = LinearRegression()
model.fit(X,y)
# generate predictions
xs = np.linspace(0, 5000, 10000).reshape(-1,1)
preds = model.predict(xs)
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
# setup figure
fig = plt.figure()
plt.style.use('default')
plt.figure(facecolor="white")
# create plot
plt.scatter(X,y)
plt.plot(xs, preds)
# decorate
plt.title("Sale price by 1st floor square footage")
plt.ylabel("Sale price")
plt.ylabel("1st floor square footage")
plt.show()
###Output
_____no_output_____
###Markdown
__Pretty visualization__
###Code
# create plotting instance
p = PrettierPlot(chart_scale=10)
# create Axes object and decorate
ax = p.make_canvas(
title="Sale price by 1st floor square footage",
x_label="1st floor square footage",
y_label="Sale price",
y_shift=0.67
)
# add plots
p.scatter_2d(x=X, y=y, x_units="f", y_units="d", ax=ax)
p.line(x=xs, y=preds, x_units="f", y_units="d", ax=ax)
###Output
_____no_output_____
###Markdown
Example 3 __Load data__
###Code
# load titanic dataset
df, _ = data.titanic()
# set predictor and target variables
df = df[["Survived","Pclass","Fare","Age"]].dropna()
X = df.iloc[:,1:].values
y = df.iloc[:,0].values
# build parameter grid
k_range = np.arange(1, 41)
param_grid = dict(n_neighbors=k_range)
# run GridSearcCV
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid, cv=10, scoring="accuracy")
grid_search.fit(X, y)
# capture mean accuracy for each value of k
all_means = grid_search.cv_results_["mean_test_score"]
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
# setup figure
fig = plt.figure()
plt.style.use('default')
plt.figure(facecolor="white")
# create plot
plt.plot(k_range, all_means)
# decorate
plt.title("Accuracy by k-value")
plt.xlabel("k")
plt.ylabel("Accuracy")
# plt.savefig(fname="./ex3_plain.svg", format="svg")
plt.show()
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
# create plotting instance
p = PrettierPlot(chart_scale=10)
# create Axes object and decorate
ax = p.make_canvas(
title="Accuracy by k-value",
x_label="k-value",
y_label="Accuracy",
y_shift=0.6,
)
# add plot
p.line(x=k_range, y=all_means, x_units="f", y_units="p", marker_on=True, ax=ax)
###Output
_____no_output_____
###Markdown
Advanced plots Example 4 __Load data__
###Code
# load employee attrition dataset
df = data.attrition()
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
sns.set()
g = sns.FacetGrid(
df,
col="MaritalStatus",
row="Gender",
hue="JobSatisfaction",
height=4,
aspect=1,
margin_titles=True,
)
g = g.map(
plt.scatter,
"MonthlyIncome",
"TotalWorkingYears"
).add_legend()
plt.savefig(fname="./ex3_plain.svg", format="svg")
###Output
_____no_output_____
###Markdown
__Pretty visualization__
###Code
# facet Pclass vs Embarked
p = PrettierPlot(chart_scale=10)
p.facet_cat_num_scatter(
df,
x="MonthlyIncome",
y="TotalWorkingYears",
cat_row="Gender",
cat_col="MaritalStatus",
split="JobSatisfaction",
bbox=(1.6, 1.2),
aspect=1.5,
height=3,
legend_labels=["1","2","3","4"],
x_units="d",
color_map="viridis"
)
###Output
Tight layout not applied. tight_layout cannot make axes height small enough to accommodate all axes decorations
###Markdown
Example 5 __Load data__
###Code
# load employee attrition dataset
df, _ = data.titanic()
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
sns.set()
g = sns.FacetGrid(
df,
row="Sex",
col="Embarked",
hue="Survived",
# hue_order=df[split].sort_values().drop_duplicates().values.tolist()
# if split is not None
# else None,
# palette=style.style_hex_mid,
despine=True,
height=4,
aspect=1,
margin_titles=True,
)
g.map(
plt.hist,
"Age",
alpha=0.5,
).add_legend()
###Output
_____no_output_____
###Markdown
__Pretty visualization__
###Code
#
p = PrettierPlot()
p.facet_cat_num_hist(
df=df,
split="Survived",
legend_labels=["Died", "Lived"],
cat_row="Sex",
cat_col="Embarked",
num_col="Age",
bbox=(1.9, 1.0),
height=4,
aspect=1,
)
###Output
_____no_output_____
###Markdown
Example 6 __Load data__
###Code
# load employee attrition dataset
df, _ = data.titanic()
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
sns.set()
g = sns.FacetGrid(
df,
col="Embarked",
# row="Sex",
aspect=1,
height=4,
margin_titles=True
)
g.map(
sns.pointplot,
"Sex",
"Survived",
"Pclass",
# hue_order=["1","2","3"],
# order=df[x].sort_values().drop_duplicates().values.tolist(),
# hue_order=df[split].sort_values().drop_duplicates().values.tolist(),
# palette=style.style_hex_mid,
alpha=0.75,
ci=None,
).add_legend()
###Output
Using the pointplot function without specifying `order` is likely to produce an incorrect plot.
Using the pointplot function without specifying `hue_order` is likely to produce an incorrect plot.
###Markdown
__Pretty visualization__
###Code
#
p = PrettierPlot()
p.facet_two_cat_point(
df=df,
x="Sex",
y="Survived",
split="Pclass",
cat_col="Embarked",
aspect=1.0,
height=5,
bbox=(1.3, 1.2),
legend_labels=["1st class", "2nd class", "3rd class"],
)
###Output
_____no_output_____
###Markdown
Example 7 __Load data__
###Code
# load titanic dataset
df, _ = data.titanic()
###Output
_____no_output_____
###Markdown
__Plain visualization__
###Code
sns.set()
g = sns.barplot(
data=df,
x="Embarked",
y="Survived",
hue="Pclass",
# palette=style.style_hex_mid,
# order=df[x].sort_values().drop_duplicates().values.tolist(),
# hue_order=df[split].sort_values().drop_duplicates().values.tolist()
# if split is not None
# else None,
ci=None,
)
###Output
_____no_output_____
###Markdown
__Pretty visualization__
###Code
# facet Pclass vs Embarked
p = PrettierPlot(chart_scale=12)
ax = p.make_canvas(title="Survivorship, embark location by passenger class", y_shift=0.7)
p.facet_two_cat_bar(
df=df,
x="Embarked",
y="Survived",
split="Pclass",
y_units="ff",
legend_labels=["1st class", "2nd class", "3rd class"],
ax=ax,
)
###Output
_____no_output_____
###Markdown
extra
###Code
df = data.attrition()
# correlation heat map
p = PrettierPlot()
ax = p.make_canvas()
p.corr_heatmap(df=df.select_dtypes(include="number").iloc[:,10:], annot=False, ax=ax)
# pair plot
p = PrettierPlot(chart_scale=12)
p.pair_plot(df=df.iloc[:,:5], diag_kind="auto")
# pair plot
p = PrettierPlot(chart_scale=12)
p.pair_plot(
df=train.data.dropna(),
diag_kind="kde",
target=train.target,
columns=["Age", "Fare", "Pclass", "Parch", "SibSp"],
legend_labels=["Died", "Survived"],
bbox=(2.0, 0.0),
)
###Output
_____no_output_____ |
nbs_gil/dl2/01_matmul.ipynb | ###Markdown
Broadcasting
###Code
c = tensor([10, 20, 30])
m = tensor([[11, 12, 13], [21, 22, 23], [31, 32, 33]])
c + m
t = c.expand_as(m)
t.storage()
t.stride()
t.shape
c.unsqueeze(0).shape
c.unsqueeze(1).shape
c[None,:].shape
c[:,None].shape
def matmul(a, b):
ar, ac = a.shape
br, bc = b.shape
res = torch.zeros([ac, bc])
for i in range(ar):
# print(a[i][:,None].shape, a[i].unsqueeze(-1).shape)
# res[i,:] = (a[i].unsqueeze(-1) * b).sum(dim=0)
res[i,:] = (a[i][:,None] * b).sum(dim=0)
return res
a
matmul(a, b)
%time t3=matmul(m1, m2)
a @ b
def matmul(a, b):
ar, ac = a.shape
br, bc = b.shape
res = torch.zeros([ac, bc])
for i in range(ar):
# print(a[i][:,None].shape, a[i].unsqueeze(-1).shape)
# res[i,:] = (a[i].unsqueeze(-1) * b).sum(dim=0)
res[i,:] = (a[i][:,None] * b).sum(dim=0)
return res
%timeit -n 10 _=matmul(a, b)
###Output
90.1 µs ± 18.7 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
Einstein Summation
###Code
def matmul(a, b):
return torch.einsum("ik,kj->ij", a, b)
a @ b
matmul(a, b)
%timeit -n 10 _=matmul(a, b)
###Output
51.1 µs ± 13.1 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
Pytorch
###Code
%timeit -n 10 t2 = m1.matmul(m2)
###Output
The slowest run took 4.33 times longer than the fastest. This could mean that an intermediate result is being cached.
11 µs ± 8.05 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
Export
###Code
!python notebook2script.py 01_matmul.ipynb
###Output
Converted 01_matmul.ipynb to exp/nb_01.py
|
Advanced Computer Vision with TensorFlow/Week 1 - Introduction to Computer Vision/Copy of C3_W1_Lab_1_transfer_learning_cats_dogs.ipynb | ###Markdown
Basic transfer learning with cats and dogs data Import tensorflow
###Code
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
###Output
_____no_output_____
###Markdown
Import modules and download the cats and dogs dataset.
###Code
import urllib.request
import os
import zipfile
import random
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.optimizers import RMSprop
from shutil import copyfile
data_url = "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip"
data_file_name = "catsdogs.zip"
download_dir = '/tmp/'
urllib.request.urlretrieve(data_url, data_file_name)
zip_ref = zipfile.ZipFile(data_file_name, 'r')
zip_ref.extractall(download_dir)
zip_ref.close()
###Output
_____no_output_____
###Markdown
Check that the dataset has the expected number of examples.
###Code
print("Number of cat images:",len(os.listdir('/tmp/PetImages/Cat/')))
print("Number of dog images:", len(os.listdir('/tmp/PetImages/Dog/')))
# Expected Output:
# Number of cat images: 12501
# Number of dog images: 12501
###Output
_____no_output_____
###Markdown
Create some folders that will store the training and test data.- There will be a training folder and a testing folder.- Each of these will have a subfolder for cats and another subfolder for dogs.
###Code
try:
os.mkdir('/tmp/cats-v-dogs')
os.mkdir('/tmp/cats-v-dogs/training')
os.mkdir('/tmp/cats-v-dogs/testing')
os.mkdir('/tmp/cats-v-dogs/training/cats')
os.mkdir('/tmp/cats-v-dogs/training/dogs')
os.mkdir('/tmp/cats-v-dogs/testing/cats')
os.mkdir('/tmp/cats-v-dogs/testing/dogs')
except OSError:
pass
###Output
_____no_output_____
###Markdown
Split data into training and test sets- The following code put first checks if an image file is empty (zero length)- Of the files that are not empty, it puts 90% of the data into the training set, and 10% into the test set.
###Code
import random
from shutil import copyfile
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):
files = []
for filename in os.listdir(SOURCE):
file = SOURCE + filename
if os.path.getsize(file) > 0:
files.append(filename)
else:
print(filename + " is zero length, so ignoring.")
training_length = int(len(files) * SPLIT_SIZE)
testing_length = int(len(files) - training_length)
shuffled_set = random.sample(files, len(files))
training_set = shuffled_set[0:training_length]
testing_set = shuffled_set[training_length:]
for filename in training_set:
this_file = SOURCE + filename
destination = TRAINING + filename
copyfile(this_file, destination)
for filename in testing_set:
this_file = SOURCE + filename
destination = TESTING + filename
copyfile(this_file, destination)
CAT_SOURCE_DIR = "/tmp/PetImages/Cat/"
TRAINING_CATS_DIR = "/tmp/cats-v-dogs/training/cats/"
TESTING_CATS_DIR = "/tmp/cats-v-dogs/testing/cats/"
DOG_SOURCE_DIR = "/tmp/PetImages/Dog/"
TRAINING_DOGS_DIR = "/tmp/cats-v-dogs/training/dogs/"
TESTING_DOGS_DIR = "/tmp/cats-v-dogs/testing/dogs/"
split_size = .9
split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size)
split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size)
# Expected output
# 666.jpg is zero length, so ignoring
# 11702.jpg is zero length, so ignoring
###Output
_____no_output_____
###Markdown
Check that the training and test sets are the expected lengths.
###Code
print("Number of training cat images", len(os.listdir('/tmp/cats-v-dogs/training/cats/')))
print("Number of training dog images", len(os.listdir('/tmp/cats-v-dogs/training/dogs/')))
print("Number of testing cat images", len(os.listdir('/tmp/cats-v-dogs/testing/cats/')))
print("Number of testing dog images", len(os.listdir('/tmp/cats-v-dogs/testing/dogs/')))
# expected output
# Number of training cat images 11250
# Number of training dog images 11250
# Number of testing cat images 1250
# Number of testing dog images 1250
###Output
_____no_output_____
###Markdown
Data augmentation (try adjusting the parameters)!Here, you'll use the `ImageDataGenerator` to perform data augmentation. - Things like rotating and flipping the existing images allows you to generate training data that is more varied, and can help the model generalize better during training. - You can also use the data generator to apply data augmentation to the validation set.You can use the default parameter values for a first pass through this lab.- Later, try to experiment with the parameters of `ImageDataGenerator` to improve the model's performance.- Try to drive reach 99.9% validation accuracy or better.
###Code
TRAINING_DIR = "/tmp/cats-v-dogs/training/"
# Experiment with your own parameters to reach 99.9% validation accuracy or better
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
batch_size=100,
class_mode='binary',
target_size=(150, 150))
VALIDATION_DIR = "/tmp/cats-v-dogs/testing/"
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
batch_size=100,
class_mode='binary',
target_size=(150, 150))
###Output
_____no_output_____
###Markdown
Get and prepare the modelYou'll be using the `InceptionV3` model. - Since you're making use of transfer learning, you'll load the pre-trained weights of the model.- You'll also freeze the existing layers so that they aren't trained on your downstream task with the cats and dogs data.- You'll also get a reference to the last layer, 'mixed7' because you'll add some layers after this last layer.
###Code
weights_url = "https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
weights_file = "inception_v3.h5"
urllib.request.urlretrieve(weights_url, weights_file)
# Instantiate the model
pre_trained_model = InceptionV3(input_shape=(150, 150, 3),
include_top=False,
weights=None)
# load pre-trained weights
pre_trained_model.load_weights(weights_file)
# freeze the layers
for layer in pre_trained_model.layers:
layer.trainable = False
# pre_trained_model.summary()
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
###Output
_____no_output_____
###Markdown
Add layersAdd some layers that you will train on the cats and dogs data.- `Flatten`: This will take the output of the `last_layer` and flatten it to a vector.- `Dense`: You'll add a dense layer with a relu activation.- `Dense`: After that, add a dense layer with a sigmoid activation. The sigmoid will scale the output to range from 0 to 1, and allow you to interpret the output as a prediction between two categories (cats or dogs).Then create the model object.
###Code
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation='sigmoid')(x)
model = Model(pre_trained_model.input, x)
###Output
_____no_output_____
###Markdown
Train the modelCompile the model, and then train it on the test data using `model.fit`- Feel free to adjust the number of epochs. This project was originally designed with 20 epochs.- For the sake of time, you can use fewer epochs (2) to see how the code runs.- You can ignore the warnings about some of the images having corrupt EXIF data. Those will be skipped.
###Code
# compile the model
model.compile(optimizer=RMSprop(lr=0.0001),
loss='binary_crossentropy',
metrics=['acc'])
# train the model (adjust the number of epochs from 1 to improve performance)
history = model.fit(
train_generator,
validation_data=validation_generator,
epochs=2,
verbose=1)
###Output
_____no_output_____
###Markdown
Visualize the training and validation accuracyYou can see how the training and validation accuracy change with each epoch on an x-y plot.
###Code
%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(len(acc)) # Get number of epochs
#------------------------------------------------
# Plot training and validation accuracy per epoch
#------------------------------------------------
plt.plot(epochs, acc, 'r', "Training Accuracy")
plt.plot(epochs, val_acc, 'b', "Validation Accuracy")
plt.title('Training and validation accuracy')
plt.figure()
###Output
_____no_output_____
###Markdown
Predict on a test imageYou can upload any image and have the model predict whether it's a dog or a cat.- Find an image of a dog or cat- Run the following code cell. It will ask you to upload an image.- The model will print "is a dog" or "is a cat" depending on the model's prediction.
###Code
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
image_tensor = np.vstack([x])
classes = model.predict(image_tensor)
print(classes)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a dog")
else:
print(fn + " is a cat")
###Output
_____no_output_____ |
Describe+the+effects+of+age+on+hearing.ipynb | ###Markdown
***Write out a description of the effects that age and gender have on the odds of developing hearing problems in terms a layperson could understand. Include estimates for the odds of hearing problems in a 95 year old woman and a 50 year old man.*** logit(HasHearingProblem)=−1+.02∗age+1∗male
###Code
import math
#odds of hearing problems in a 95 year old woman
a = -1+ 0.02*95 + 1*0
c = math.exp( a )
d = math.exp( a )/(1+ c)
print('Probability of having hearing problems over not having them:', c)
print('HashearingProblem:', d)
###Output
HashearingProblem: 2.45960311115695
HashearingProblem: 0.710949502625004
###Markdown
The probability of a 95 year old man of having hearing problems is nearly 2.5 times more than the probability of not developing them being the probability of 71%
###Code
#odds of hearing problems in a 50 year old man
b = -1+.02*50+1*1
c = math.exp( a )
d = math.exp( a )/(1+ c)
print('Probability of having hearing problems over not having them:', c)
print('HashearingProblem:', d)
###Output
Probability of having hearing problems over not having them: 2.45960311115695
HashearingProblem: 0.710949502625004
|
4_2_Robot_Localization/2_2. Probability After Sense, solution.ipynb | ###Markdown
Robot SensorsA robot senses the world through cameras and other sensors, but these sensors are not perfectly accurate. In the video, you saw an example of a robot in a 1D world made of colored grid cells; all cells were either green or red. The robot then sensed that it was in a red grid cell. The probability that this reading was accurate, which we'll call the prbability that the sensor has hit its target, `pHit`, was `0.6` and the probability that this reading was inaccurate (the sensor has missed its target) and the robot was *actually* in a green cell was `pMiss` equal to `0.2`.In this notebook, let's go through how this works step by step. Uniform DistributionThe robot starts with a map with a length of 5 cells. Since the robot does not know where it is at first, the probability of being in any space is the same; a uniform distribution!
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# ex. initialize_robot(5) = [0.2, 0.2, 0.2, 0.2, 0.2]
def initialize_robot(grid_length):
''' Takes in a grid length and returns
a uniform distribution of location probabilities'''
p = []
# create a list that has the value of 1/grid_length for each cell
for i in range(grid_length):
p.append(1.0/grid_length)
return p
###Output
_____no_output_____
###Markdown
I'll also include a helper function for visualizing this distribution. The below function, `display_map` will output a bar chart showing the probability that a robot is in each grid space. The y-axis has a range of 0 to 1 for the range of probabilities. For a uniform distribution, this will look like a flat line. You can choose the width of each bar to be <= 1 should you want to space these out.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# initialize a 5 cell, 1D world
p = initialize_robot(5)
display_map(p)
###Output
_____no_output_____
###Markdown
Probability After SenseThen the robot senses that it is in a red cell, and updates its probabilities. As per our example:* The probability that it is sensing the correct color is `pHit = 0.6`.* The probability that it is sensing the incorrect color (in this case: seeing red but *actually* in a green cell) is `pMiss = 0.2` Next, we write code that outputs a new grid, `p`, after multiplying each entry by pHit or pMiss at the appropriate places. Remember that the red cells (cell 1 and 2) are "hits" and the other green cells are "misses."Note that you may see values that are not exact due to how machines imperfectly represent floating points.
###Code
# given initial variables
p = initialize_robot(5)
pHit = 0.6
pMiss = 0.2
# Creates a new grid, with modified probabilities, after sensing
# All values are calculated by a product of 1. the sensing probability for a color (pHit for red)
# and 2. the current probability of a robot being in that location p[i]; all equal to 0.2 at first.
p[0] = p[0]*pMiss
p[1] = p[1]*pHit
p[2] = p[2]*pHit
p[3] = p[3]*pMiss
p[4] = p[4]*pMiss
print(p)
display_map(p)
###Output
[0.04000000000000001, 0.12, 0.12, 0.04000000000000001, 0.04000000000000001]
###Markdown
You should see that the red grid cells (1 and 2) have a higher probability than the green cells. One thing that may look strange is how low these probability bars are, and you may have noticed that these don't accurately represent a probability distribution because the components of this list do not add up to 1! QUIZ: Compute the sum of all of these probabilities.What do these values add up to and how do you think we can turn this into a probability distribution whose components do add up to 1? In the next code cell, write code to sum up the values in the new world, `p`.
###Code
# What is the sum of all the values in p?
## TODO: add up all the values in the list of location probabilities to determine the answer
## Solution 1, loop through and add up al values of p:
total = 0.0
for prob in p:
total += prob
print(total)
## Solution 2, use Pythons sum function, which sums all values in a list
print(sum(p))
###Output
0.3600000000000001
0.3600000000000001
###Markdown
Robot SensorsA robot senses the world through cameras and other sensors, but these sensors are not perfectly accurate. In the video, you saw an example of a robot in a 1D world made of colored grid cells; all cells were either green or red. The robot then sensed that it was in a red grid cell. The probability that this reading was accurate, which we'll call the prbability that the sensor has hit its target, `pHit`, was `0.6` and the probability that this reading was inaccurate (the sensor has missed its target) and the robot was *actually* in a green cell was `pMiss` equal to `0.2`.In this notebook, let's go through how this works step by step. Uniform DistributionThe robot starts with a map with a length of 5 cells. Since the robot does not know where it is at first, the probability of being in any space is the same; a uniform distribution!
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# ex. initialize_robot(5) = [0.2, 0.2, 0.2, 0.2, 0.2]
def initialize_robot(grid_length):
''' Takes in a grid length and returns
a uniform distribution of location probabilities'''
p = []
# create a list that has the value of 1/grid_length for each cell
for i in range(grid_length):
p.append(1.0/grid_length)
return p
###Output
_____no_output_____
###Markdown
I'll also include a helper function for visualizing this distribution. The below function, `display_map` will output a bar chart showing the probability that a robot is in each grid space. The y-axis has a range of 0 to 1 for the range of probabilities. For a uniform distribution, this will look like a flat line. You can choose the width of each bar to be <= 1 should you want to space these out.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# initialize a 5 cell, 1D world
p = initialize_robot(5)
display_map(p)
###Output
_____no_output_____
###Markdown
Probability After SenseThen the robot senses that it is in a red cell, and updates its probabilities. As per our example:* The probability that it is sensing the correct color is `pHit = 0.6`.* The probability that it is sensing the incorrect color (in this case: seeing red but *actually* in a green cell) is `pMiss = 0.2` Next, we write code that outputs a new grid, `p`, after multiplying each entry by pHit or pMiss at the appropriate places. Remember that the red cells (cell 1 and 2) are "hits" and the other green cells are "misses."Note that you may see values that are not exact due to how machines imperfectly represent floating points.
###Code
# given initial variables
p = initialize_robot(5)
pHit = 0.6
pMiss = 0.2
# Creates a new grid, with modified probabilities, after sensing
# All values are calculated by a product of 1. the sensing probability for a color (pHit for red)
# and 2. the current probability of a robot being in that location p[i]; all equal to 0.2 at first.
p[0] = p[0]*pMiss
p[1] = p[1]*pHit
p[2] = p[2]*pHit
p[3] = p[3]*pMiss
p[4] = p[4]*pMiss
print(p)
display_map(p)
###Output
[0.04000000000000001, 0.12, 0.12, 0.04000000000000001, 0.04000000000000001]
###Markdown
You should see that the red grid cells (1 and 2) have a higher probability than the green cells. One thing that may look strange is how low these probability bars are, and you may have noticed that these don't accurately represent a probability distribution because the components of this list do not add up to 1! QUIZ: Compute the sum of all of these probabilities.What do these values add up to and how do you think we can turn this into a probability distribution whose components do add up to 1? In the next code cell, write code to sum up the values in the new world, `p`.
###Code
# What is the sum of all the values in p?
## TODO: add up all the values in the list of location probabilities to determine the answer
## Solution 1, loop through and add up al values of p:
total = 0.0
for prob in p:
total += prob
print(total)
## Solution 2, use Pythons sum function, which sums all values in a list
print(sum(p))
###Output
0.3600000000000001
0.3600000000000001
###Markdown
Robot SensorsA robot senses the world through cameras and other sensors, but these sensors are not perfectly accurate. In the video, you saw an example of a robot in a 1D world made of colored grid cells; all cells were either green or red. The robot then sensed that it was in a red grid cell. The probability that this reading was accurate, which we'll call the prbability that the sensor has hit its target, `pHit`, was `0.6` and the probability that this reading was inaccurate (the sensor has missed its target) and the robot was *actually* in a green cell was `pMiss` equal to `0.2`.In this notebook, let's go through how this works step by step. Uniform DistributionThe robot starts with a map with a length of 5 cells. Since the robot does not know where it is at first, the probability of being in any space is the same; a uniform distribution!
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# ex. initialize_robot(5) = [0.2, 0.2, 0.2, 0.2, 0.2]
def initialize_robot(grid_length):
''' Takes in a grid length and returns
a uniform distribution of location probabilities'''
# create a list that has the value of 1/grid_length for each cell
p = [1.0/grid_length]*grid_length
return p
###Output
_____no_output_____
###Markdown
I'll also include a helper function for visualizing this distribution. The below function, `display_map` will output a bar chart showing the probability that a robot is in each grid space. The y-axis has a range of 0 to 1 for the range of probabilities. For a uniform distribution, this will look like a flat line. You can choose the width of each bar to be <= 1 should you want to space these out.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# initialize a 5 cell, 1D world
p = initialize_robot(5)
display_map(p)
###Output
_____no_output_____
###Markdown
Probability After SenseThen the robot senses that it is in a red cell, and updates its probabilities. As per our example:* The probability that it is sensing the correct color is `pHit = 0.6`.* The probability that it is sensing the incorrect color (in this case: seeing red but *actually* in a green cell) is `pMiss = 0.2` Next, we write code that outputs a new grid, `p`, after multiplying each entry by pHit or pMiss at the appropriate places. Remember that the red cells (cell 1 and 2) are "hits" and the other green cells are "misses."Note that you may see values that are not exact due to how machines imperfectly represent floating points.
###Code
# given initial variables
p = initialize_robot(5)
pHit = 0.6
pMiss = 0.2
# Creates a new grid, with modified probabilities, after sensing
# All values are calculated by a product of 1. the sensing probability for a color (pHit for red)
# and 2. the current probability of a robot being in that location p[i]; all equal to 0.2 at first.
p[0] = p[0]*pMiss
p[1] = p[1]*pHit
p[2] = p[2]*pHit
p[3] = p[3]*pMiss
p[4] = p[4]*pMiss
print(p)
display_map(p)
###Output
[0.04000000000000001, 0.12, 0.12, 0.04000000000000001, 0.04000000000000001]
###Markdown
You should see that the red grid cells (1 and 2) have a higher probability than the green cells. One thing that may look strange is how low these probability bars are, and you may have noticed that these don't accurately represent a probability distribution because the components of this list do not add up to 1! QUIZ: Compute the sum of all of these probabilities.What do these values add up to and how do you think we can turn this into a probability distribution whose components do add up to 1? In the next code cell, write code to sum up the values in the new world, `p`.
###Code
# What is the sum of all the values in p?
## TODO: add up all the values in the list of location probabilities to determine the answer
## Solution 1, loop through and add up al values of p:
total = 0.0
for prob in p:
total += prob
print(total)
## Solution 2, use Pythons sum function, which sums all values in a list
print(sum(p))
###Output
0.3600000000000001
0.3600000000000001
###Markdown
Robot SensorsA robot senses the world through cameras and other sensors, but these sensors are not perfectly accurate. In the video, you saw an example of a robot in a 1D world made of colored grid cells; all cells were either green or red. The robot then sensed that it was in a red grid cell. The probability that this reading was accurate, which we'll call the prbability that the sensor has hit its target, `pHit`, was `0.6` and the probability that this reading was inaccurate (the sensor has missed its target) and the robot was *actually* in a green cell was `pMiss` equal to `0.2`.In this notebook, let's go through how this works step by step. Uniform DistributionThe robot starts with a map with a length of 5 cells. Since the robot does not know where it is at first, the probability of being in any space is the same; a uniform distribution!
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# ex. initialize_robot(5) = [0.2, 0.2, 0.2, 0.2, 0.2]
def initialize_robot(grid_length):
''' Takes in a grid length and returns
a uniform distribution of location probabilities'''
p = []
# create a list that has the value of 1/grid_length for each cell
for i in range(grid_length):
p.append(1.0/grid_length)
return p
###Output
_____no_output_____
###Markdown
I'll also include a helper function for visualizing this distribution. The below function, `display_map` will output a bar chart showing the probability that a robot is in each grid space. The y-axis has a range of 0 to 1 for the range of probabilities. For a uniform distribution, this will look like a flat line. You can choose the width of each bar to be <= 1 should you want to space these out.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# initialize a 5 cell, 1D world
p = initialize_robot(5)
display_map(p)
###Output
_____no_output_____
###Markdown
Probability After SenseThen the robot senses that it is in a red cell, and updates its probabilities. As per our example:* The probability that it is sensing the correct color is `pHit = 0.6`.* The probability that it is sensing the incorrect color (in this case: seeing red but *actually* in a green cell) is `pMiss = 0.2` Next, we write code that outputs a new grid, `p`, after multiplying each entry by pHit or pMiss at the appropriate places. Remember that the red cells (cell 1 and 2) are "hits" and the other green cells are "misses."Note that you may see values that are not exact due to how machines imperfectly represent floating points.
###Code
# given initial variables
p = initialize_robot(5)
pHit = 0.6
pMiss = 0.2
# Creates a new grid, with modified probabilities, after sensing
# All values are calculated by a product of 1. the sensing probability for a color (pHit for red)
# and 2. the current probability of a robot being in that location p[i]; all equal to 0.2 at first.
p[0] = p[0]*pMiss
p[1] = p[1]*pHit
p[2] = p[2]*pHit
p[3] = p[3]*pMiss
p[4] = p[4]*pMiss
print(p)
display_map(p)
###Output
[0.04000000000000001, 0.12, 0.12, 0.04000000000000001, 0.04000000000000001]
###Markdown
You should see that the red grid cells (1 and 2) have a higher probability than the green cells. One thing that may look strange is how low these probability bars are, and you may have noticed that these don't accurately represent a probability distribution because the components of this list do not add up to 1! QUIZ: Compute the sum of all of these probabilities.What do these values add up to and how do you think we can turn this into a probability distribution whose components do add up to 1? In the next code cell, write code to sum up the values in the new world, `p`.
###Code
# What is the sum of all the values in p?
## TODO: add up all the values in the list of location probabilities to determine the answer
## Solution 1, loop through and add up al values of p:
total = 0.0
for prob in p:
total += prob
print(total)
## Solution 2, use Pythons sum function, which sums all values in a list
print(sum(p))
###Output
0.3600000000000001
0.3600000000000001
###Markdown
Robot SensorsA robot senses the world through cameras and other sensors, but these sensors are not perfectly accurate. In the video, you saw an example of a robot in a 1D world made of colored grid cells; all cells were either green or red. The robot then sensed that it was in a red grid cell. The probability that this reading was accurate, which we'll call the prbability that the sensor has hit its target, `pHit`, was `0.6` and the probability that this reading was inaccurate (the sensor has missed its target) and the robot was *actually* in a green cell was `pMiss` equal to `0.2`.In this notebook, let's go through how this works step by step. Uniform DistributionThe robot starts with a map with a length of 5 cells. Since the robot does not know where it is at first, the probability of being in any space is the same; a uniform distribution!
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# ex. initialize_robot(5) = [0.2, 0.2, 0.2, 0.2, 0.2]
def initialize_robot(grid_length):
''' Takes in a grid length and returns
a uniform distribution of location probabilities'''
p = []
# create a list that has the value of 1/grid_length for each cell
for i in range(grid_length):
p.append(1.0/grid_length)
return p
###Output
_____no_output_____
###Markdown
I'll also include a helper function for visualizing this distribution. The below function, `display_map` will output a bar chart showing the probability that a robot is in each grid space. The y-axis has a range of 0 to 1 for the range of probabilities. For a uniform distribution, this will look like a flat line. You can choose the width of each bar to be <= 1 should you want to space these out.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# initialize a 5 cell, 1D world
p = initialize_robot(5)
display_map(p)
###Output
_____no_output_____
###Markdown
Probability After SenseThen the robot senses that it is in a red cell, and updates its probabilities. As per our example:* The probability that it is sensing the correct color is `pHit = 0.6`.* The probability that it is sensing the incorrect color (in this case: seeing red but *actually* in a green cell) is `pMiss = 0.2` Next, we write code that outputs a new grid, `p`, after multiplying each entry by pHit or pMiss at the appropriate places. Remember that the red cells (cell 1 and 2) are "hits" and the other green cells are "misses."Note that you may see values that are not exact due to how machines imperfectly represent floating points.
###Code
# given initial variables
p = initialize_robot(5)
pHit = 0.6
pMiss = 0.2
# Creates a new grid, with modified probabilities, after sensing
# All values are calculated by a product of 1. the sensing probability for a color (pHit for red)
# and 2. the current probability of a robot being in that location p[i]; all equal to 0.2 at first.
p[0] = p[0]*pMiss
p[1] = p[1]*pHit
p[2] = p[2]*pHit
p[3] = p[3]*pMiss
p[4] = p[4]*pMiss
print(p)
display_map(p)
###Output
[0.04000000000000001, 0.12, 0.12, 0.04000000000000001, 0.04000000000000001]
###Markdown
You should see that the red grid cells (1 and 2) have a higher probability than the green cells. One thing that may look strange is how low these probability bars are, and you may have noticed that these don't accurately represent a probability distribution because the components of this list do not add up to 1! QUIZ: Compute the sum of all of these probabilities.What do these values add up to and how do you think we can turn this into a probability distribution whose components do add up to 1? In the next code cell, write code to sum up the values in the new world, `p`.
###Code
# What is the sum of all the values in p?
## TODO: add up all the values in the list of location probabilities to determine the answer
## Solution 1, loop through and add up al values of p:
total = 0.0
for prob in p:
total += prob
print(total)
## Solution 2, use Pythons sum function, which sums all values in a list
print(sum(p))
###Output
0.3600000000000001
0.3600000000000001
###Markdown
Robot SensorsA robot senses the world through cameras and other sensors, but these sensors are not perfectly accurate. In the video, you saw an example of a robot in a 1D world made of colored grid cells; all cells were either green or red. The robot then sensed that it was in a red grid cell. The probability that this reading was accurate, which we'll call the prbability that the sensor has hit its target, `pHit`, was `0.6` and the probability that this reading was inaccurate (the sensor has missed its target) and the robot was *actually* in a green cell was `pMiss` equal to `0.2`.In this notebook, let's go through how this works step by step. Uniform DistributionThe robot starts with a map with a length of 5 cells. Since the robot does not know where it is at first, the probability of being in any space is the same; a uniform distribution!
###Code
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# ex. initialize_robot(5) = [0.2, 0.2, 0.2, 0.2, 0.2]
def initialize_robot(grid_length):
''' Takes in a grid length and returns
a uniform distribution of location probabilities'''
p = []
# create a list that has the value of 1/grid_length for each cell
for i in range(grid_length):
p.append(1.0/grid_length)
return p
###Output
_____no_output_____
###Markdown
I'll also include a helper function for visualizing this distribution. The below function, `display_map` will output a bar chart showing the probability that a robot is in each grid space. The y-axis has a range of 0 to 1 for the range of probabilities. For a uniform distribution, this will look like a flat line. You can choose the width of each bar to be <= 1 should you want to space these out.
###Code
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# initialize a 5 cell, 1D world
p = initialize_robot(5)
display_map(p)
###Output
_____no_output_____
###Markdown
Probability After SenseThen the robot senses that it is in a red cell, and updates its probabilities. As per our example:* The probability that it is sensing the correct color is `pHit = 0.6`.* The probability that it is sensing the incorrect color (in this case: seeing red but *actually* in a green cell) is `pMiss = 0.2` Next, we write code that outputs a new grid, `p`, after multiplying each entry by pHit or pMiss at the appropriate places. Remember that the red cells (cell 1 and 2) are "hits" and the other green cells are "misses."Note that you may see values that are not exact due to how machines imperfectly represent floating points.
###Code
# given initial variables
p = initialize_robot(5)
pHit = 0.6
pMiss = 0.2
# Creates a new grid, with modified probabilities, after sensing
# All values are calculated by a product of 1. the sensing probability for a color (pHit for red)
# and 2. the current probability of a robot being in that location p[i]; all equal to 0.2 at first.
p[0] = p[0]*pMiss
p[1] = p[1]*pHit
p[2] = p[2]*pHit
p[3] = p[3]*pMiss
p[4] = p[4]*pMiss
print(p)
display_map(p)
###Output
[0.04000000000000001, 0.12, 0.12, 0.04000000000000001, 0.04000000000000001]
###Markdown
You should see that the red grid cells (1 and 2) have a higher probability than the green cells. One thing that may look strange is how low these probability bars are, and you may have noticed that these don't accurately represent a probability distribution because the components of this list do not add up to 1! QUIZ: Compute the sum of all of these probabilities.What do these values add up to and how do you think we can turn this into a probability distribution whose components do add up to 1? In the next code cell, write code to sum up the values in the new world, `p`.
###Code
# What is the sum of all the values in p?
## TODO: add up all the values in the list of location probabilities to determine the answer
## Solution 1, loop through and add up al values of p:
total = 0.0
for prob in p:
total += prob
print(total)
## Solution 2, use Pythons sum function, which sums all values in a list
print(sum(p))
###Output
0.3600000000000001
0.3600000000000001
|
Modulo4/Ejercicios/1.DataFrames y Series-ejercicio.ipynb | ###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as ps
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
serie_numero = [10, 20, 10]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
serie_color = ['rojo', 'verde', 'azul']
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
dicx = {}
df = ps.DataFrame()
df
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
dicx['Numeros'] = serie_numero
df = ps.DataFrame(dicx)
df
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
dicx['Colores'] = serie_color
df = ps.DataFrame(dicx)
df
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
df = ps.read_csv('./data/pandas/avengers.csv')
df
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
df.head(5)
# Muestra las primeras 10 filas del DataFrame.
df.head(10)
# Muestra las últimas 5 filas del DataFrame.
df.tail(5)
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
df.shape
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
df.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
df = df.set_index("fecha_inicio")
df
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
df = df.sort_values(by="fecha_inicio", ascending = False)
df
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
df = df.reset_index()
df
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
# Muestra las primeras 10 filas del DataFrame.
# Muestra las últimas 5 filas del DataFrame.
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
serie1 = ["10", "20", "10"]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
serie2 = ["rojo", "verde", "azul"]
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
dicx = {}
df = pd.DataFrame(dicx)
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
dicx["Serie 1"] = serie1
df = pd.DataFrame(dicx)
df
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
dicx["Serie 2"] = serie2
df = pd.DataFrame(dicx)
df
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/pandas/avengers.csv"
df = pd.read_csv('./data/pandas/avengers.csv')
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
df.head()
# Muestra las primeras 10 filas del DataFrame.
df.head(10)
# Muestra las últimas 5 filas del DataFrame.
df.tail()
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
df.shape
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
df.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
df = df.set_index("fecha_inicio")
df
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
df = df.sort_values(by="fecha_inicio", ascending=False)
df
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
df = df.reset_index()
df
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
numeros=["10","20","10"]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
colores=["rojo","verde","azul"]
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
import pandas as pd
df = pd.DataFrame()
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
df['numeros'] = numeros
df
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
df['colores'] = colores
df
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
avengers = pd.read_csv("data/pandas/avengers.csv")
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
avengers.head(5)
# Muestra las primeras 10 filas del DataFrame.
avengers.head(10)
# Muestra las últimas 5 filas del DataFrame.
avengers.tail(10)
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
avengers.shape
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
avengers.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
df2 = avengers.set_index("fecha_inicio")
df2
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
by_year = df2.sort_values('fecha_inicio',ascending=False)
by_year
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
df2 = df2.reset_index(drop=True)
df2
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
serie1 = ["10", "20", "10"]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
serie2 = ["rojo", "verde", "azul"]
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
dicx = {}
df = pd.DataFrame(dicx)
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
dicx["Serie 1"] = serie1
df = pd.DataFrame(dicx)
df
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
dicx["Serie 2"] = serie2
df = pd.DataFrame(dicx)
df
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
df = pd.read_csv('./data/pandas/avengers.csv')
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
df.head()
# Muestra las primeras 10 filas del DataFrame.
df.head(10)
# Muestra las últimas 5 filas del DataFrame.
df.tail()
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
df.shape
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
df.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
df = df.set_index("fecha_inicio")
df
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
df = df.sort_values(by="fecha_inicio", ascending=False)
df
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
df = df.reset_index()
df
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
# Muestra las primeras 10 filas del DataFrame.
# Muestra las últimas 5 filas del DataFrame.
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
df = pd.Dataframe()
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
# Muestra las primeras 10 filas del DataFrame.
# Muestra las últimas 5 filas del DataFrame.
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
serie1 = ["10", "20", "10"]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
serie2 = ["rojo", "verde", "azul"]
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
dicx = {}
df = pd.DataFrame(dicx)
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
dicx["Serie 1"] = serie1
df = pd.DataFrame(dicx)
df
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
dicx["Serie 2"] = serie2
df = pd.DataFrame(dicx)
df
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
df = pd.read_csv('./data/pandas/avengers.csv')
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
df.head()
# Muestra las primeras 10 filas del DataFrame.
df.head(10)
# Muestra las últimas 5 filas del DataFrame.
df.tail()
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
df.shape
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
df.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
df = df.set_index("fecha_inicio")
df
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
df = df.sort_values(by="fecha_inicio", ascending=False)
df
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
df = df.reset_index()
df
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
serie1 = ["10", "20", "10"]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
serie2 = ["rojo", "verde", "azul"]
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
dicx = {}
df = pd.DataFrame(dicx)
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
dicx["Serie 1"] = serie1
df = pd.DataFrame(dicx)
df
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
dicx["Serie 2"] = serie2
df = pd.DataFrame(dicx)
df
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
df = pd.read_csv('./data/pandas/avengers.csv')
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
df.head()
# Muestra las primeras 10 filas del DataFrame.
df.head(10)
# Muestra las últimas 5 filas del DataFrame.
df.tail()
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
df.shape
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
df.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
df = df.set_index("fecha_inicio")
df
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
df = df.sort_values(by="fecha_inicio", ascending=False)
df
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
df = df.reset_index()
df
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 30.
numeros = pd.Series([10, 20, 30])
numeros
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
colores = ['rojo', 'verde', 'azul']
df = pd.DataFrame({'numeros': [10, 20, 30], 'colores': ['rojo', 'verde', 'azul']})
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea un dataframe vacío llamado 'df'
# Crea una nueva columna en el dataframe, y asignale la primera serie que has creado
df = pd.DataFrame('numeros': [10, 20, 30])
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
df = pd.DataFrame('colores': ['rojo', 'verde', 'azul'])
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
import pandas as pd
avengers = pd.read_csv("./data/pandas/avengers.csv")
avengers.head()
###Output
_____no_output_____
###Markdown
BORRADOR
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
import pandas as pd
avengers = {'nombre':['Jonathan Pym','Janet van Dyne','Tony Stark',' Bruce Banner','Thor Odinson','Milhouse Jones'],
'n_apariciones':[1269,1165,3068,2089,2402,612],
'actual':['YES','YES','YES','YES','YES','YES'],
'genero':['masculino','femenino','masculino','masculino','masculino'],
'fecha_inicio':[1963,1963,1963,1963,1963,1963],
'notes':['Merged with Ultron in Rage of Ultron Vol. 1. A funeral was held.','Dies in Secret Invasion V1:I8. Actually was sent tto Microverse later recovered','Death: "Later while under the influence of Immortus Stark committed a number of horrible acts and was killed.','Dies in Ghosts of the Future arc. However "he had actually used a hidden Pantheon base to survive"','Dies in Fear Itself brought back because that is kind of the whole point. Second death in Time Runs Out has not yet returned','nada']}
actores = pd.DataFrame(avengers)
print(actores)
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
avengers.head(5)
# Muestra las primeras 10 filas del DataFrame.
avengers.head(10)
avengers.head(5)
# tail -> retorna los últimos 5 resultados del df
avengers.tail()
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
avengers.describe()
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
avengers.dtypes
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
avengers1 = avengers.set_index("fecha_inicio")
avengers1.head()
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
avengers.sort_values("n_apariciones",ascending=False)
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
avengers = avengers.reset_index()
del avengers['index']
avengers = avengers.reindex()
###Output
_____no_output_____
###Markdown
Importar Pandas
###Code
#importa pandas
import pandas as pd
###Output
_____no_output_____
###Markdown
Crear una Serie
###Code
# Crea una Serie de los numeros 10, 20 and 10.
serie1 = ["10", "20", "10"]
# Crea una Serie con tres objetos: 'rojo', 'verde', 'azul'
serie2 = ["rojo", "verde", "azul"]
###Output
_____no_output_____
###Markdown
Crear un Dataframe
###Code
# Crea otra columna en el dataframe y asignale la segunda Serie que has creado
###Output
_____no_output_____
###Markdown
Leer un dataframe
###Code
# Lee el archivo llamado 'avengers.csv" localizado en la carpeta "data" y crea un DataFrame, llamado 'avengers'.
# El archivo está localizado en "data/avengers.csv"
###Output
_____no_output_____
###Markdown
Inspeccionar un dataframe
###Code
# Muestra las primeras 5 filas del DataFrame.
# Muestra las primeras 10 filas del DataFrame.
# Muestra las últimas 5 filas del DataFrame.
###Output
_____no_output_____
###Markdown
Tamaño del DataFrame
###Code
# Muestra el tamaño del DataFrame
###Output
_____no_output_____
###Markdown
Data types en un DataFrame
###Code
# Muestra los data types del dataframe
###Output
_____no_output_____
###Markdown
Editar el indice (index)
###Code
# Cambia el indice a la columna "fecha_inicio".
###Output
_____no_output_____
###Markdown
Ordenar el indice
###Code
# Ordena el índice de forma descendiente
###Output
_____no_output_____
###Markdown
Resetear el indice
###Code
# Resetea el índice
###Output
_____no_output_____ |
notebooks/02.5-make-projection-dfs/buckeye-phones-umap.ipynb | ###Markdown
load data
###Code
DATASET_ID = 'buckeye'
df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'phones.pickle'
syllable_df = pd.read_pickle(df_loc)
del syllable_df['audio']
syllable_df[:3]
np.shape(syllable_df.spectrogram.values[0])
###Output
_____no_output_____
###Markdown
project
###Code
specs = list(syllable_df.spectrogram.values)
specs = [i/np.max(i) for i in tqdm(specs)]
specs_flattened = flatten_spectrograms(specs)
np.shape(specs_flattened)
cuml_umap = cumlUMAP()
embedding = cuml_umap.fit_transform(specs_flattened)
syllable_df['umap'] = list(embedding)
syllable_df[:3]
fig, ax = plt.subplots()
ax.scatter(embedding[:,0], embedding[:,1], s=1, color='k', alpha = 0.005)
ax.set_xlim([-8,8])
ax.set_ylim([-8,8])
###Output
_____no_output_____
###Markdown
Save
###Code
ensure_dir(DATA_DIR / 'embeddings' / DATASET_ID / 'human_full')
syllable_df.to_pickle(DATA_DIR / 'embeddings' / DATASET_ID / 'human_full.pickle')
###Output
_____no_output_____ |
week_02_linear models/Seminar_02_liner_regression.ipynb | ###Markdown
Семинар 2 – Линейные модели
###Code
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import scipy.stats as st
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
1. Постановка задачи  Где линейная модель - это: $$ \hat{y} = f(x) = \theta_0*1 + \theta_1*x_1 + ... + \theta_n*x_n = \theta^T*X$$ Сгенерируем исскуственные данные, на основе функции:$$f(x) = 4x+5$$
###Code
def lin_function(x):
return 4*x+5
x_true = np.array([-2,2])
y_true = lin_function(x_true)
plt.plot(x_true, y_true, linewidth=1)
plt.show()
n = 100000
x = np.random.rand(n,1)*4-2
e = np.random.rand(n,1)*4-2
y = lin_function(x) + e
plt.scatter(x, y, color='g')
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
2. Метрики Mean Absoulte Error:$$MAE = \frac1N \sum_{i = 1}^N|f(x_i) - y_i| = \frac1N \sum_{i = 1}^N|\hat y_i - y_i| = \frac1N || \hat Y - Y||_1$$Mean Sqared Error:$$MSE = \frac1N \sum_{i = 1}^N(f(x_i) - y_i)^2 = \frac1N \sum_{i = 1}^N(\hat y_i - y_i)^2 = \frac1N ||\hat Y - Y||_2$$ Почему работаем с MSE? 3. Аналитический метод поиска минимума по MCE$$MSE -> min $$$$MSE = \frac1N \sum_{i = 1}^N(\hat y_i - y_i)^2 = \frac1N \sum_{i = 1}^N(\theta_i * x_i - y_i)^2 = \frac1N ||X \theta - Y||_2 = \frac1N (X\theta - Y)^T*(X\theta - Y) $$$$ \frac{d}{d\theta}[\frac1N (X\theta - Y)^T*(X\theta - Y)] = \frac1N \frac{d}{d\theta}[Y^TY - 2Y^TX\theta+\theta^TX^TX\theta] $$$$\hat \theta = \bigl(X^T \cdot X \bigr)^{-1} \cdot X^T \cdot y $$
###Code
x_matrix = np.c_[np.ones((n,1)),x]
%%time
thetha_matrix = np.linalg.inv(x_matrix.T.dot(x_matrix)).dot(x_matrix.T).dot(y) #
###Output
CPU times: user 3.77 ms, sys: 3.96 ms, total: 7.73 ms
Wall time: 5.71 ms
###Markdown
Обратите внимание на время работы
###Code
thetha_matrix.T[0].tolist()
print("Свободный член: {[0][0]:.7}".format(thetha_matrix.T))
print("Коэфициент: {[0][1]:.7}".format(thetha_matrix.T))
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
параметры
###Code
%%time
lr = LinearRegression()
lr.fit(x,y);
print("Свободный член: {:.7}".format(lr.intercept_[0]))
print("Коэфициент: {:.7}".format(lr.coef_[0][0]))
plt.scatter(x, y, color='g')
plt.scatter(x, lr.predict(x), color='r')
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
4. Градиентный спуск$$\theta^{(t+1)} = \theta^{(t)} - lr\cdot \nabla MSE(\theta^{(t)}),$$где $lr$ — длина шага градиентного спуска (learning rate).$$\nabla MSE(\theta)= \frac{2}{N} X^T \cdot \bigl(X \cdot \theta - Y \bigr) $$
###Code
%%time
lr = 0.1 # learning rate
n_iterations = 100
theta = np.random.randn(2,1) # random initialization
plt.scatter(x, y, color='g')
for iteration in range(n_iterations):
if iteration < 10:
plt.plot(x_true, x_true*theta[1]+theta[0], linewidth=1, color='r')
gradients = 2/n * x_matrix.T.dot(x_matrix.dot(theta) - y) #
theta = theta - lr * gradients #
plt.plot(x_true, y_true, linewidth=1)
plt.show()
print(theta)
###Output
_____no_output_____
###Markdown
Слишком маленький шаг обучения (learning rate)
###Code
lr = 0.01 # learning rate
n_iterations = 100
theta = np.random.randn(2,1) # random initialization
plt.scatter(x, y, color='g')
for iteration in range(n_iterations):
if iteration < 10:
plt.plot(x_true, x_true*theta[1]+theta[0], linewidth=1, color='r')
gradients = 2/n * x_matrix.T.dot(x_matrix.dot(theta) - y)
theta = theta - lr * gradients
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
Слишком большой шаг обучения (learning rate)
###Code
lr = 1.01 # learning rate
n_iterations = 100
theta = np.random.randn(2,1) # random initialization
plt.scatter(x, y, color='g')
for iteration in range(n_iterations):
if iteration < 10:
plt.plot(x_true, x_true*theta[1]+theta[0], linewidth=1, color='r')
gradients = 2/n * x_matrix.T.dot(x_matrix.dot(theta) - y)
theta = theta - lr * gradients
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
Уменьшение шага на каждой итерации
###Code
lr = 1 # learning rate
n_iterations = 1000
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = 2/n * x_matrix.T.dot(x_matrix.dot(theta) - y)
theta = theta - (lr/(iteration+1)) * gradients #
print(theta)
###Output
[[5.16588465]
[4.14849683]]
###Markdown
Learning rate - гипперпараметр, и можно воспользоваться GridSearchCV, однако чтобы не учить каждый раз такое кол-во итераций, мы можем измерять норму градиента, и прекращать спуск, когда он "затух"
###Code
lr = 1 # learning rate
n_iterations = 1000
tol = 0.00001
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = 2/n * x_matrix.T.dot(x_matrix.dot(theta) - y)
if np.linalg.norm(gradients) < tol:
break
theta = theta - (lr/(iteration+1)) * gradients
print('Градиент затух на {} итерации '.format(iteration))
print(theta)
###Output
Градиент затух на 172 итерации
[[5.16588547]
[4.14849302]]
###Markdown
__Реализация в Scikit-Learn отсутствует__ 5. Cтохастический градиентный спуск у среднего случайных подвыборок то же что и у всех данных
###Code
n_epochs = 100
def learning_schedule(t):
return t0 / (t + t1)
t0, t1 = 5, 100 # learning schedule hyperparameters
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(n):
random_index = np.random.randint(n)
xi = x_matrix[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * n + i)
theta = theta - lr * gradients
print(theta)
###Output
[[5.17039667]
[4.15116695]]
###Markdown
6. Пакетный градиентный спуск
###Code
n_epochs = 100
def learning_schedule(t):
return t0 / (t + t1)
t0, t1 = 5, 100 # learning schedule hyperparameters
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(n):
random_index = np.random.randint(n)
xi = x_matrix[random_index:random_index+10]
yi = y[random_index:random_index+10]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
lr = learning_schedule(epoch * n + i)
theta = theta - lr * gradients
print(theta)
from sklearn.linear_model import SGDRegressor
sgd = SGDRegressor(tol=0.0001)
#The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol).
sgd.fit(x,y)
sgd.intercept_, sgd.coef_
###Output
_____no_output_____
###Markdown
7. Функции потерь в регрессии
###Code
from google.colab import drive
drive.mount('/content/gdrive/')
with open('./gdrive/My Drive/Colab Notebooks/Семинар 3/data_preprocessed.json') as file:
X = pd.read_json(file)
X_subset = X[[7, 15]].values
# add two outliers
X_subset_modified = np.vstack((X_subset, [[1, 90], [2, 50]]))
def scatter_points_and_plot_line_MSE(X_subset):
plt.scatter(X_subset[:, 0], X_subset[:, 1])
lr = LinearRegression()
lr.fit(X_subset[:, 0][:, None], X_subset[:, 1])
grid = np.linspace(0, 2, 100)
line = lr.predict(grid[:, None])
plt.plot(grid, line)
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
scatter_points_and_plot_line_MSE(X_subset)
plt.ylim(-20, 100)
plt.xlabel("x")
plt.ylabel("y")
plt.subplot(1, 2, 2)
scatter_points_and_plot_line_MSE(X_subset_modified)
plt.ylim(-20, 100)
plt.xlabel("x")
###Output
_____no_output_____
###Markdown
Из-за шумовых объектов прямая достаточно сильно изменила наклон. Поэтому вместо MSE часто используют Mean Absoulte Error:$$L(y_i, a(x_i)) = |y_i - a(x_i)|$$Теперь обучим регрессию, оптимизируя MAE. В sklearn такая регрессия не реализована, но можно использовать модуль statsmodels
###Code
!pip install statsmodels==0.11.1
import statsmodels.api as sm
import statsmodels.formula.api as smf
plt.figure(figsize=(20, 5))
plt.ylabel("y")
mod = smf.quantreg('f15 ~ f7', pd.DataFrame(data=X_subset_modified, columns=["f7", "f15"])) # задаеем зависимость и передаем данные
res = mod.fit(q=0.5)
plt.scatter(X_subset_modified[:, 0], X_subset_modified[:, 1]) # визуализируем точки
grid = np.linspace(0, 2, 100)
plt.plot(grid, grid * res.params["f7"] + res.params["Intercept"]) # визуализируем прямую
plt.ylim(-20, 100)
plt.xlabel("x")
###Output
_____no_output_____
###Markdown
Прямая не изменила направление из-за выбросов. Попробуем добавить больше шумовых объектов:
###Code
X_subset_modified_twice = np.vstack((
X_subset_modified,
np.random.randint(5, size=60).reshape(-1, 2) * [1, 30],
))
plt.figure(figsize=(20, 5))
plt.ylabel("y")
mod = smf.quantreg('f15 ~ f7', pd.DataFrame(data=X_subset_modified_twice, columns=["f7", "f15"])) # задаеем зависимость и передаем данные
res = mod.fit(q=0.5)
plt.scatter(X_subset_modified_twice[:, 0], X_subset_modified_twice[:, 1]) # визуализируем точки
grid = np.linspace(0, 4, 200)
plt.plot(grid, grid * res.params["f7"] + res.params["Intercept"]) # визуализируем прямую
plt.ylim(-20, 100)
plt.xlabel("x")
###Output
_____no_output_____
###Markdown
Прямая изменила наклон, когда мы добавили 30 (почти 15%) шумовых точек. 7. Мультиколлинеарность и регуляризация
###Code
# !pip install seaborn
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
###Output
_____no_output_____
###Markdown
Для оценка качества работы наших алгоритмов будем использовать коэфициент детерминации:$$R^2 = 1 - \frac{\sum_i (y_i - a(x_i))^2}{\sum_i (y_i - \overline{y}_i)^2}$$ Решение задачи МНК
###Code
def my_linear_regression(X_train, Y_train):
return np.linalg.inv(X_train.T.dot(X_train)).dot(X_train.T).dot(y_train)
def predict(X, w):
return np.dot(X, w)
###Output
_____no_output_____
###Markdown
Загрузим датасет https://habrahabr.ru/post/206306/
###Code
data = pd.read_csv('energy_efficiency.csv')
###Output
_____no_output_____
###Markdown
Для примера решения задачи прогнозирования, я взял набор данных Energy efficiency из крупнейшего репозитория UCI. В нем $X_1 ... X_8$ — характеристики помещения на основании которых будет проводиться анализ, а $y_1,y_2$ — значения нагрузки, которые надо спрогнозировать.- $X_1$ Относительная компактность- $X_2$ Площадь- $X_3$ Площадь стен- $X_4$ Площадь потолка - $X_5$ Общая высота - $X_6$ Ориентация- $X_7$ Площадь остекления - $X_8$ Распределенная площадь остекления - $y_1$ Нагрузка при обогреве- $y_2$ Нагрузка при охлаждении
###Code
data.head()
###Output
_____no_output_____
###Markdown
Посмотрим на скоррелированность данных
###Code
data.corr()
f, ax = plt.subplots(figsize=(10, 8))
corr = data.drop(['Y1','Y2'], axis=1).corr()
sns.heatmap(corr, square=True, ax=ax, cmap=sns.diverging_palette(220, 10, as_cmap=True))
f, ax = plt.subplots(figsize=(10, 8))
corr = data.corr()
sns.heatmap(corr, square=True, ax=ax, cmap=sns.diverging_palette(220, 10, as_cmap=True))
###Output
_____no_output_____
###Markdown
Видим, что x1 скоррелирован с x2, а x4 с x5. Из-за этого матрица $X^{T}*X$ необратима. Посмотрим как на таких данных отработает наша линейная регрессия Разобьем выборку на train и test
###Code
X = data.drop(['Y1','Y2'], axis=1)
y = data['Y1']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Обучим регрессию и посмотрим на качество
###Code
w = my_linear_regression(X_train, y_train)
y_train_pred = predict(X_train, w)
print("Train MSE: ", mean_squared_error(y_train, y_train_pred))
print("Train R2: ", r2_score(y_train, y_train_pred))
y_test_pred = predict(X_test, w)
print("Test MSE: ", mean_squared_error(y_test, y_test_pred))
print("Test R2: ", r2_score(y_test, y_test_pred))
###Output
Test MSE: 460128.72662043874
Test R2: -4413.449687872916
###Markdown
Как-то не очень Попробуем убрать скоррелированные признаки
###Code
X = data.drop(['X1','X4', 'Y1','Y2'], axis=1)
y = data['Y1']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Обучим регрессию и посмотрим на качество
###Code
w = my_linear_regression(X_train, y_train)
y_train_pred = predict(X_train, w)
print("Train MSE: ", mean_squared_error(y_train, y_train_pred))
print("Train R2: ", r2_score(y_train, y_train_pred))
y_test_pred = predict(X_test, w)
print("Test MSE: ", mean_squared_error(y_test, y_test_pred))
print("Test R2: ", r2_score(y_test, y_test_pred))
###Output
Test MSE: 11.387213360639421
Test R2: 0.8907517015187721
###Markdown
Юху! Получили алгоритм с хорошим качеством Реализуем линейную регрессию с L2 регуляризацией
###Code
def my_linear_regression(X_train, Y_train, l2=0):
return np.linalg.inv(X_train.T.dot(X_train) + l2*np.eye(X_train.shape[1])).dot(X_train.T).dot(y_train) #
###Output
_____no_output_____
###Markdown
Обучим регрессию с регуляризацией и посмотрим на качество
###Code
X = data.drop(['Y1','Y2'], axis=1)
y = data['Y1']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
w = my_linear_regression(X_train, y_train, l2=0.001)
y_train_pred = predict(X_train, w)
print("Train MSE: ", mean_squared_error(y_train, y_train_pred))
print("Train R2: ", r2_score(y_train, y_train_pred))
y_test_pred = predict(X_test, w)
print("Test MSE: ", mean_squared_error(y_test, y_test_pred))
print("Test R2: ", r2_score(y_test, y_test_pred))
###Output
Test MSE: 9.4639612678421
Test R2: 0.9092032762837478
###Markdown
Семинар 2 – Линейные модели
###Code
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import scipy.stats as st
import warnings
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
1. Постановка задачи  Где линейная модель - это: $$ \hat{y} = f(x) = \theta_0*1 + \theta_1*x_1 + ... + \theta_n*x_n = \theta^T*X$$ Сгенерируем исскуственные данные, на основе функции:$$f(x) = 4x+5$$
###Code
def lin_function(x):
return 4*x+5
x_true = np.array([-2,2])
y_true = lin_function(x_true)
plt.plot(x_true, y_true, linewidth=1)
plt.show()
n = 100
x = np.random.rand(n,1)*4-2
e = np.random.rand(n,1)*4-2
y = lin_function(x) + e
plt.scatter(x, y, color='g')
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
2. Метрики Mean Absoulte Error:$$MAE = \frac1N \sum_{i = 1}^N|f(x_i) - y_i| = \frac1N \sum_{i = 1}^N|\hat y_i - y_i| = \frac1N || \hat Y - Y||_1$$Mean Sqared Error:$$MSE = \frac1N \sum_{i = 1}^N(f(x_i) - y_i)^2 = \frac1N \sum_{i = 1}^N(\hat y_i - y_i)^2 = \frac1N ||\hat Y - Y||_2$$ Почему работаем с MSE? 3. Аналитический метод поиска минимума по MCE$$MSE -> min $$$$\hat \theta = \bigl(X^T \cdot X \bigr)^{-1} \cdot X^T \cdot y $$
###Code
x_matrix = np.c_[np.ones((n,1)),x]
%%time
thetha_matrix = # Ваш код здесь
###Output
_____no_output_____
###Markdown
Обратите внимание на время работы
###Code
thetha_matrix.T[0].tolist()
print("Свободный член: {[0][0]:.7}".format(thetha_matrix.T))
print("Коэфициент: {[0][1]:.7}".format(thetha_matrix.T))
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
параметры
###Code
%%time
lr = LinearRegression()
lr.fit(x,y);
print("Свободный член: {:.7}".format(lr.intercept_[0]))
print("Коэфициент: {:.7}".format(lr.coef_[0][0]))
plt.scatter(x, y, color='g')
plt.scatter(x, lr.predict(x), color='r')
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
4. Градиентный спуск$$\theta^{(t+1)} = \theta^{(t)} - lr\cdot \nabla MSE(\theta^{(t)}),$$где $lr$ — длина шага градиентного спуска (learning rate).$$\nabla MSE(\theta)= \frac{2}{N} X^T \cdot \bigl(X \cdot \theta - Y \bigr) $$
###Code
%%time
lr = 0.1 # learning rate
n_iterations = 100
theta = np.random.randn(2,1) # random initialization
plt.scatter(x, y, color='g')
for iteration in range(n_iterations):
if iteration < 10:
plt.plot(x_true, x_true*theta[1]+theta[0], linewidth=1, color='r')
gradients = # Ваш код здесь
theta = # Ваш код здесь
plt.plot(x_true, y_true, linewidth=1)
plt.show()
print(theta)
###Output
_____no_output_____
###Markdown
Слишком маленький шаг обучения (learning rate)
###Code
lr = 0.01 # learning rate
n_iterations = 100
theta = np.random.randn(2,1) # random initialization
plt.scatter(x, y, color='g')
for iteration in range(n_iterations):
if iteration < 10:
plt.plot(x_true, x_true*theta[1]+theta[0], linewidth=1, color='r')
gradients = # Ваш код здесь
theta = # Ваш код здесь
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
Слишком большой шаг обучения (learning rate)
###Code
lr = 1.01 # learning rate
n_iterations = 100
theta = np.random.randn(2,1) # random initialization
plt.scatter(x, y, color='g')
for iteration in range(n_iterations):
if iteration < 10:
plt.plot(x_true, x_true*theta[1]+theta[0], linewidth=1, color='r')
gradients = # Ваш код здесь
theta = # Ваш код здесь
plt.plot(x_true, y_true, linewidth=1)
plt.show()
###Output
_____no_output_____
###Markdown
Уменьшение шага на каждой итерации
###Code
lr = 1 # learning rate
n_iterations = 1000
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = # Ваш код здесь
theta = # Ваш код здесь
print(theta)
###Output
[[5.16588465]
[4.14849683]]
###Markdown
Learning rate - гипперпараметр, и можно воспользоваться GridSearchCV, однако чтобы не учить каждый раз такое кол-во итераций, мы можем измерять норму градиента, и прекращать спуск, когда он "затух"
###Code
lr = 1 # learning rate
n_iterations = 1000
tol = 0.00001
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = # Ваш код здесь
if # Ваш код здесь:
break
theta = # Ваш код здесь
print('Градиент затух на {} итерации '.format(iteration))
print(theta)
###Output
Градиент затух на 172 итерации
[[5.16588547]
[4.14849302]]
###Markdown
__Реализация в Scikit-Learn отсутствует__ 5. Cтохастический градиентный спуск у среднего случайных подвыборок то же что и у всех данных
###Code
n_epochs = 100
def learning_schedule(t):
return t0 / (t + t1)
t0, t1 = 5, 100 # learning schedule hyperparameters
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(n):
random_index = np.random.randint(n)
xi = # Ваш код здесь
yi = # Ваш код здесь
gradients = # Ваш код здесь
lr = # Ваш код здесь
theta = # Ваш код здесь
print(theta)
###Output
[[5.17039667]
[4.15116695]]
###Markdown
6. Пакетный градиентный спуск
###Code
n_epochs = 100
def learning_schedule(t):
return t0 / (t + t1)
t0, t1 = 5, 100 # learning schedule hyperparameters
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(n):
random_index = np.random.randint(n)
xi = # Ваш код здесь
yi = # Ваш код здесь
gradients = # Ваш код здесь
lr = # Ваш код здесь
theta = # Ваш код здесь
print(theta)
from sklearn.linear_model import SGDRegressor
sgd = SGDRegressor(tol=0.0001)
#The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol).
sgd.fit(x,y)
sgd.intercept_, sgd.coef_
###Output
_____no_output_____
###Markdown
7. Функции потерь в регрессии
###Code
from google.colab import drive
drive.mount('/content/gdrive/')
with open('./gdrive/My Drive/Colab Notebooks/Семинар 3/data_preprocessed.json') as file:
X = pd.read_json(file)
X_subset = X[[7, 15]].values
# add two outliers
X_subset_modified = np.vstack((X_subset, [[1, 90], [2, 50]]))
def scatter_points_and_plot_line_MSE(X_subset):
plt.scatter(X_subset[:, 0], X_subset[:, 1])
lr = LinearRegression()
lr.fit(X_subset[:, 0][:, None], X_subset[:, 1])
grid = np.linspace(0, 2, 100)
line = lr.predict(grid[:, None])
plt.plot(grid, line)
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
scatter_points_and_plot_line_MSE(X_subset)
plt.ylim(-20, 100)
plt.xlabel("x")
plt.ylabel("y")
plt.subplot(1, 2, 2)
scatter_points_and_plot_line_MSE(X_subset_modified)
plt.ylim(-20, 100)
plt.xlabel("x")
###Output
_____no_output_____
###Markdown
Из-за шумовых объектов прямая достаточно сильно изменила наклон. Поэтому вместо MSE часто используют Mean Absoulte Error:$$L(y_i, a(x_i)) = |y_i - a(x_i)|$$Теперь обучим регрессию, оптимизируя MAE. В sklearn такая регрессия не реализована, но можно использовать модуль statsmodels
###Code
!pip install statsmodels==0.11.1
import statsmodels.api as sm
import statsmodels.formula.api as smf
plt.figure(figsize=(20, 5))
plt.ylabel("y")
mod = smf.quantreg('f15 ~ f7', pd.DataFrame(data=X_subset_modified, columns=["f7", "f15"])) # задаеем зависимость и передаем данные
res = mod.fit(q=0.5)
plt.scatter(X_subset_modified[:, 0], X_subset_modified[:, 1]) # визуализируем точки
grid = np.linspace(0, 2, 100)
plt.plot(grid, grid * res.params["f7"] + res.params["Intercept"]) # визуализируем прямую
plt.ylim(-20, 100)
plt.xlabel("x")
###Output
_____no_output_____
###Markdown
Прямая не изменила направление из-за выбросов. Попробуем добавить больше шумовых объектов:
###Code
X_subset_modified_twice = np.vstack((
X_subset_modified,
np.random.randint(5, size=60).reshape(-1, 2) * [1, 30],
))
plt.figure(figsize=(20, 5))
plt.ylabel("y")
mod = smf.quantreg('f15 ~ f7', pd.DataFrame(data=X_subset_modified_twice, columns=["f7", "f15"])) # задаеем зависимость и передаем данные
res = mod.fit(q=0.5)
plt.scatter(X_subset_modified_twice[:, 0], X_subset_modified_twice[:, 1]) # визуализируем точки
grid = np.linspace(0, 4, 200)
plt.plot(grid, grid * res.params["f7"] + res.params["Intercept"]) # визуализируем прямую
plt.ylim(-20, 100)
plt.xlabel("x")
###Output
_____no_output_____
###Markdown
Прямая изменила наклон, когда мы добавили 30 (почти 15%) шумовых точек. 7. Мультиколлинеарность и регуляризация
###Code
# !pip install seaborn
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
###Output
_____no_output_____
###Markdown
Для оценка качества работы наших алгоритмов будем использовать коэфициент детерминации:$$R^2 = 1 - \frac{\sum_i (y_i - a(x_i))^2}{\sum_i (y_i - \overline{y}_i)^2}$$ Решение задачи МНК
###Code
def my_linear_regression(X_train, Y_train):
return np.linalg.inv(X_train.T.dot(X_train)).dot(X_train.T).dot(y_train)
def predict(X, w):
return np.dot(X, w)
###Output
_____no_output_____
###Markdown
Загрузим датасет https://habrahabr.ru/post/206306/
###Code
data = pd.read_csv('energy_efficiency.csv')
###Output
_____no_output_____
###Markdown
Для примера решения задачи прогнозирования, я взял набор данных Energy efficiency из крупнейшего репозитория UCI. В нем $X_1 ... X_8$ — характеристики помещения на основании которых будет проводиться анализ, а $y_1,y_2$ — значения нагрузки, которые надо спрогнозировать.- $X_1$ Относительная компактность- $X_2$ Площадь- $X_3$ Площадь стен- $X_4$ Площадь потолка - $X_5$ Общая высота - $X_6$ Ориентация- $X_7$ Площадь остекления - $X_8$ Распределенная площадь остекления - $y_1$ Нагрузка при обогреве- $y_2$ Нагрузка при охлаждении
###Code
data.head()
###Output
_____no_output_____
###Markdown
Посмотрим на скоррелированность данных
###Code
data.corr()
f, ax = plt.subplots(figsize=(10, 8))
corr = data.corr()
sns.heatmap(corr, square=True, ax=ax, cmap=sns.diverging_palette(220, 10, as_cmap=True))
f, ax = plt.subplots(figsize=(10, 8))
corr = # Ваш код здесь
sns.heatmap(corr, square=True, ax=ax, cmap=sns.diverging_palette(220, 10, as_cmap=True))
###Output
_____no_output_____
###Markdown
Видим, что x1 скоррелирован с x2, а x4 с x5. Из-за этого матрица $X^{T}*X$ необратима. Посмотрим как на таких данных отработает наша линейная регрессия Разобьем выборку на train и test
###Code
X = data.drop(['Y1','Y2'], axis=1)
y = data['Y1']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Обучим регрессию и посмотрим на качество
###Code
w = my_linear_regression(X_train, y_train)
y_train_pred = predict(X_train, w)
print("Train MSE: ", mean_squared_error(y_train, y_train_pred))
print("Train R2: ", r2_score(y_train, y_train_pred))
y_test_pred = predict(X_test, w)
print("Test MSE: ", mean_squared_error(y_test, y_test_pred))
print("Test R2: ", r2_score(y_test, y_test_pred))
###Output
Test MSE: 460128.72662043874
Test R2: -4413.449687872916
###Markdown
Как-то не очень Попробуем убрать скоррелированные признаки
###Code
X = # Ваш код здесь
y = data['Y1']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Обучим регрессию и посмотрим на качество
###Code
w = my_linear_regression(X_train, y_train)
y_train_pred = predict(X_train, w)
print("Train MSE: ", mean_squared_error(y_train, y_train_pred))
print("Train R2: ", r2_score(y_train, y_train_pred))
y_test_pred = predict(X_test, w)
print("Test MSE: ", mean_squared_error(y_test, y_test_pred))
print("Test R2: ", r2_score(y_test, y_test_pred))
###Output
Test MSE: 11.387213360639421
Test R2: 0.8907517015187721
###Markdown
Юху! Получили алгоритм с хорошим качеством Реализуем линейную регрессию с L2 регуляризацией
###Code
def my_linear_regression(X_train, Y_train, l2=0):
return # Ваш код здесь
###Output
_____no_output_____
###Markdown
Обучим регрессию с регуляризацией и посмотрим на качество
###Code
X = data.drop(['Y1','Y2'], axis=1)
y = data['Y1']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
w = my_linear_regression(X_train, y_train, l2=0.001)
y_train_pred = predict(X_train, w)
print("Train MSE: ", mean_squared_error(y_train, y_train_pred))
print("Train R2: ", r2_score(y_train, y_train_pred))
y_test_pred = predict(X_test, w)
print("Test MSE: ", mean_squared_error(y_test, y_test_pred))
print("Test R2: ", r2_score(y_test, y_test_pred))
###Output
Test MSE: 9.4639612678421
Test R2: 0.9092032762837478
|
3he4he/Generate Training Space MCMC.ipynb | ###Markdown
Generate Training SpaceFor now, we will take the $R$-matrix parameter posteriors from a previous run to define training distributions. In the future, we would like to use AZURE2's bset fit and error estimates. Unfortunately, we want to sample ANCs. The best fit when ANCs are varied is pretty bad.
###Code
import os
import sys
import pickle
from multiprocessing import Pool
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import emcee
import model
import priors
sys.path.append('/home/odell/7Be')
import run
os.environ['OMP_NUM_THREADS'] = '1'
nbr = model.branching_ratio.shape[0]
ncap = model.num_pts_total_capture
nscat = model.num_pts_total_scatter
def ln_prior(theta):
return np.sum([p.logpdf(t) for (p, t) in zip(priors.priors, theta)])
def ln_likelihood(theta):
theta_R = theta[:model.nrpar]
theta_f_capture = theta[model.nrpar:model.nrpar+model.nf_capture]
theta_f_scatter = theta[model.nrpar+model.nf_capture:]
f_capture = model.map_uncertainty(theta_f_capture, model.num_pts_capture)
f_scatter = model.map_uncertainty(theta_f_scatter, model.num_pts_scatter)
f = np.ones(nbr+ncap+nscat)
f[nbr:nbr+ncap] = f_capture
f[nbr+ncap:] = f_scatter
mu= model.calculate(theta_R)
return np.sum(-np.log(np.sqrt(2*np.pi*model.dy)) - 0.5*(f*model.y - mu)**2/(f*model.dy)**2)
def ln_posterior(theta):
lnpi = ln_prior(theta)
if lnpi == -np.inf:
return -np.inf
return lnpi + ln_likelihood(theta)
nw = 2*model.ndim
theta_star = np.load('datfiles/theta_star.npy')[:-1]
p0 = np.array(
[stats.norm(theta_star, 0.01*np.abs(theta_star)).rvs() for _ in range(nw)]
)
moves = [(emcee.moves.DEMove(), 0.2), (emcee.moves.DESnookerMove(), 0.8)]
pool = Pool(processes=16)
sampler = emcee.EnsembleSampler(nw, model.ndim, ln_posterior, moves=moves, pool=pool)
state = sampler.run_mcmc(p0, 500, thin_by=10, tune=True, progress=True)
chain = sampler.get_chain()
plt.plot(chain[:, :, 2]);
###Output
_____no_output_____ |
Flowers Recognition/.ipynb_checkpoints/flower-classification-model-tensorflow-checkpoint.ipynb | ###Markdown
**This is an interesting dataset for building Deep Learning Neural Networks. here we use tensorflow keras API to form the model.**
###Code
# Import the necessary libraries
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
from os.path import join
import cv2
import pandas
import os
import random
# Set the path of the input folder
# data = "../input/flowers/flowers/"
data = "../input/flowers/flowers/"
# List out the directories inside the main input folder
folders = os.listdir(data)
print(folders)
# Import the images and resize them to a 128*128 size
# Also generate the corresponding labels
image_names = []
train_labels = []
train_images = []
size = 64,64
for folder in folders:
for file in os.listdir(os.path.join(data,folder)):
if file.endswith("jpg"):
image_names.append(os.path.join(data,folder,file))
train_labels.append(folder)
img = cv2.imread(os.path.join(data,folder,file))
im = cv2.resize(img,size)
train_images.append(im)
else:
continue
# Transform the image array to a numpy type
train = np.array(train_images)
train.shape
# Reduce the RGB values between 0 and 1
train = train.astype('float32') / 255.0
# Extract the labels
label_dummies = pandas.get_dummies(train_labels)
labels = label_dummies.values.argmax(1)
pandas.unique(train_labels)
pandas.unique(labels)
# Shuffle the labels and images randomly for better results
union_list = list(zip(train, labels))
random.shuffle(union_list)
train,labels = zip(*union_list)
# Convert the shuffled list to numpy array type
train = np.array(train)
labels = np.array(labels)
# Develop a sequential model using tensorflow keras
model = keras.Sequential([
keras.layers.Flatten(input_shape=(64,64,3)),
keras.layers.Dense(128, activation=tf.nn.tanh),
keras.layers.Dense(5, activation=tf.nn.softmax)
])
# Compute the model parameters
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the model with 5 epochs
model.fit(train,labels, epochs=5)
###Output
_____no_output_____ |
cs231n/assignment1/.ipynb_checkpoints/svm-checkpoint.ipynb | ###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
print 'dev data shape: ', X_dev.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print mean_image[:10] # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print X_train.shape, X_val.shape, X_test.shape, X_dev.shape
###Output
(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.00001)
print 'loss: %f' % (loss, )
###Output
loss: 9.056040
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 1e2)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: -21.030787 analytic: -21.030787, relative error: 6.343291e-13
numerical: -55.911471 analytic: -55.911471, relative error: 4.348354e-12
numerical: -23.563209 analytic: -23.563209, relative error: 3.209687e-11
numerical: -14.022300 analytic: -14.022300, relative error: 6.945531e-12
numerical: -5.166736 analytic: -5.166736, relative error: 3.778776e-11
numerical: 4.190686 analytic: 4.190686, relative error: 7.748555e-11
numerical: -13.460558 analytic: -13.460558, relative error: 3.215904e-11
numerical: -25.034816 analytic: -25.034816, relative error: 1.097007e-11
numerical: 8.879367 analytic: 8.879367, relative error: 1.403402e-11
numerical: 16.101044 analytic: 16.101044, relative error: 2.348291e-11
numerical: -25.579323 analytic: -25.579323, relative error: 5.518102e-13
numerical: -7.857061 analytic: -7.857061, relative error: 7.759999e-12
numerical: 0.457842 analytic: 0.457842, relative error: 9.200583e-10
numerical: -18.783472 analytic: -18.783472, relative error: 9.624315e-12
numerical: -9.040995 analytic: -9.040995, relative error: 4.679635e-11
numerical: -28.495001 analytic: -28.495001, relative error: 1.705602e-12
numerical: 4.619240 analytic: 4.619240, relative error: 3.027572e-11
numerical: -5.114085 analytic: -5.114085, relative error: 2.982017e-11
numerical: 3.527877 analytic: 3.527877, relative error: 1.056164e-10
numerical: -7.172330 analytic: -7.172330, relative error: 2.331349e-11
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** *In zero point, the Hinge Loss is not differentiable, so gradient check may fail.*
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# The losses should match but your vectorized implementation should be much faster.
print 'difference: %f' % (loss_naive - loss_vectorized)
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss and gradient: computed in %fs' % (toc - tic)
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss and gradient: computed in %fs' % (toc - tic)
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'difference: %f' % difference
###Output
Naive loss and gradient: computed in 0.191279s
Vectorized loss and gradient: computed in 0.009903s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=5e4,
num_iters=1500, verbose=True)
toc = time.time()
print 'That took %fs' % (toc - tic)
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
y_val_pred = svm.predict(X_val)
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-6]
regularization_strengths = [5e4, 1e5]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
pass
for lr in learning_rates:
for rs in regularization_strengths:
svm = LinearSVM()
loss_hist = svm.train(X_train, y_train, learning_rate=lr, reg=rs,
num_iters=1500, verbose=True)
y_train_pred = svm.predict(X_train)
train_acc = np.mean(y_train == y_train_pred)
y_val_pred = svm.predict(X_val)
val_acc = np.mean(y_val == y_val_pred)
results[(lr, rs)] = (train_acc, val_acc)
if val_acc > best_val:
best_val = val_acc
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____ |
jupyter/chapter30-data_structure.ipynb | ###Markdown
In Java, there are already plenty of data structures already availablethere are grouped under the name the collection API. to create a simple list
###Code
var list = List.of(1, 2, 3);
###Output
_____no_output_____
###Markdown
a list is an indexed data structure that stores object in the order of insertionsget() access to an element given an index
###Code
var firstElement = list.get(0);
var lastElement = list.get(list.size() - 1);
###Output
_____no_output_____
###Markdown
contains return true if a value is contained in the list
###Code
System.out.println(list.contains(4));
###Output
_____no_output_____
###Markdown
indexOf returns the first index of the element in the list
###Code
System.out.println(list.indexOf(2));
###Output
_____no_output_____
###Markdown
to loop over the elements of a list, we have a special syntax using the keyword 'for'
###Code
var countries = List.of('UK', 'US', 'France');
for(var country: countries) {
System.out.println(country);
}
###Output
_____no_output_____
###Markdown
you can also loop over the elements using a method forEachif you don't understand this one, don't panic, we will see it later
###Code
countries.forEach(country -> System.out.println(country));
###Output
_____no_output_____
###Markdown
a list also defines the method equals() and toString(), soyou can print a list or test if two list are equals
###Code
System.out.println(countries);
System.out.println(list.equals(countries));
###Output
_____no_output_____
###Markdown
in Java, depending on how you create a collection it can be changedafter creation or not. Implementation that allow mutation after creationare called modifiable by example, the list above (created with the static method of()) is not modifiable
###Code
//countries.set(0, 'Poland') // throws an UnsupportedOperationException
###Output
_____no_output_____
###Markdown
To create a modifiable list, we use an ArrayList, created using the operator 'new'here because there is no element in the list, the compiler has no way to knowthe type of the elements so we have to provide it in between angle brackets ('')
###Code
var modifiableCountries = new ArrayList<String>();
###Output
_____no_output_____
###Markdown
To add elements in a list, we have the method add()
###Code
modifiableCountries.add("UK");
modifiableCountries.add("US");
modifiableCountries.add("France");
modifiableCountries.add("Poland");
###Output
_____no_output_____
###Markdown
to remove an element, we have the method remove()
###Code
modifiableCountries.remove("UK");
###Output
_____no_output_____
###Markdown
an unmodifiable list or a modifiable list have the same set of methods,so you can loop over the modiable list the same way
###Code
for(var country: modifiableCountries) {
System.out.println(country);
}
###Output
_____no_output_____
###Markdown
You can create a modifiable list from an unmodifiable one using new ArrayListwith the unmodifiable list as argumentIn that case you don't have to specify the type of the elementsthe compiler already knows the type of list hence the (diamond)
###Code
var modifiableList = new ArrayList<>(list);
System.out.println(modifiableList);
###Output
_____no_output_____
###Markdown
Lists are not the only data structure in Java, you also have set, queue and map- a set is set where you can not store the same object twice (object are the same is equals() return true)- a queue add or remove object at the head or at the tail of the queue (so a stack is a queue, a FIFO is a queue, etc)- a map is a dictionary that associate a key (which is unique) to a value so to create an unmodifiable set, using the static method of()
###Code
var authors = Set.of("J.R.R. Tolkien", "Philip K. Dick", "George R.R. Martin");
System.out.println(authors);
###Output
_____no_output_____
###Markdown
elements inside a set are organized in a way that make contains fast
###Code
System.out.println(authors.contains(""Philip K. Dick""));
###Output
_____no_output_____
###Markdown
In Java, there are already plenty of data structures already availablethere are grouped under the name the collection API. Lists are not the only data structure in Java, you also have set, queue and map- a set is set where you can not store the same object twice (object are the same is equals() return true)- a queue add or remove object at the head or at the tail of the queue (so a stack is a queue, a FIFO is a queue, etc)- a map is a dictionary that associate a key (which is unique) to a value so to create an unmodifiable set, using the static method of()
###Code
var authors = Set.of("J.R.R. Tolkien", "Philip K. Dick", "George R.R. Martin");
System.out.println(authors);
###Output
_____no_output_____
###Markdown
elements inside a set are organized in a way that make `contains` fast
###Code
System.out.println(authors.contains("Philip K. Dick"));
###Output
_____no_output_____ |
VAE_VectorQuantized_single_cell.ipynb | ###Markdown
VQ-VAE for clustering dataNeural Discrete Representation Learning: https://arxiv.org/abs/1711.00937 Generating Diverse High-Fidelity Images with VQ-VAE-2: https://arxiv.org/abs/1906.00446VQ-VAE example: https://github.com/deepmind/sonnet/blob/master/sonnet/examples/vqvae_example.ipynb
###Code
!pip install scanpy
import pandas as pd
import scanpy as sc
from scipy.sparse import csr_matrix
import h5py
#Step 1: import dependencies
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from keras import regularizers
import time
from __future__ import division
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import seaborn as sns; sns.set()
import keras.backend as K
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
%matplotlib inline
plt.style.use('ggplot')
from __future__ import print_function
import os
import subprocess
import tempfile
import matplotlib.pyplot as plt
import numpy as np
import sonnet as snt
import tensorflow as tf
import tarfile
from six.moves import cPickle
from six.moves import urllib
from six.moves import xrange
###Output
_____no_output_____
###Markdown
Create some data
###Code
def import_data_fn(string):
data_mat = h5py.File(string)
x_data = np.array(data_mat['X'])
y_data = np.array(data_mat['Y'])
return x_data, y_data, np.where(x_data > 0.0, 1, 0)
X_1, y_1, Z_1 = import_data_fn('10X_PBMC.h5')
print(X_1.shape)
X_2, y_2, Z_2 = import_data_fn('mouse_bladder_cell.h5')
print(X_2.shape)
# X_3, y_3, Z_3 = import_data_fn('worm_neuron_cell.h5')
# X_3, y_3, Z_3 = np.delete(X_3, 2682, axis=0), np.delete(y_3, 2682, axis=0), np.delete(Z_3, 2682, axis=0)
# print(X_3.shape)
plt.plot(np.sort(np.mean(X_1, axis=1)/np.median(np.mean(X_1, axis=1))), 'o');
plt.plot(np.sort(np.mean(X_2, axis=1)/np.median(np.mean(X_2, axis=1))), 'o');
plt.plot(np.sort(np.mean(X_3, axis=1)/np.median(np.mean(X_3, axis=1))), 'o');
plt.xlabel('cells');
plt.ylabel('mean over genes/median');
plt.legend(('pbmc', 'bladder', 'neuron'))
plt.scatter(np.mean(X_1, axis=1)/np.median(np.mean(X_1, axis=1)), np.sum(Z_1, axis=1))
plt.scatter(np.mean(X_2, axis=1)/np.median(np.mean(X_2, axis=1)), np.sum(Z_2, axis=1))
plt.scatter(np.mean(X_3, axis=1)/np.median(np.mean(X_3, axis=1)), np.sum(Z_3, axis=1))
plt.xlabel('mean over genes/median')
plt.ylabel('sum of non-zero genes')
plt.legend(('pbmc', 'bladder', 'neuron'))
p1 = []
pn1 = []
for i in range(50):
lim = i
pn1.append(lim)
n = Z_1.shape[1]-np.argwhere(np.sum(Z_1, axis=0)>lim).shape[0]
p1.append(n)
p2 = []
pn2 = []
for i in range(50):
lim = i
pn2.append(lim)
n = Z_2.shape[1]-np.argwhere(np.sum(Z_2, axis=0)>lim).shape[0]
p2.append(n)
# p3 = []
# pn3 = []
# for i in range(50):
# lim = i
# pn3.append(lim)
# n = Z_3.shape[1]-np.argwhere(np.sum(Z_3, axis=0)>lim).shape[0]
# p3.append(n)
plt.scatter(pn1, p1);
plt.scatter(pn2, p2);
plt.scatter(pn3, p3);
plt.xlabel('n cells');
plt.ylabel('number of genes with n cells or less');
plt.legend(('pbmc', 'bladder', 'neuron'))
def remove_genes(X, Z, n):
indx = np.argwhere(np.sum(Z, axis=0)>n)
X1 = np.squeeze(X[:, indx])
Z1 = np.squeeze(Z[:, indx])
return X1, Z1
print(X_1.shape)
print(X_2.shape)
print(X_3.shape)
X_1, Z_1 = remove_genes(X_1, Z_1, 5)
X_2, Z_2 = remove_genes(X_2, Z_2, 5)
# X_3, Z_3 = remove_genes(X_3, Z_3, 5)
print(X_1.shape)
print(X_2.shape)
# print(X_3.shape)
xp = np.linspace(-2,7)
plt.scatter(np.log(np.mean(X_1, axis=0)), np.log(np.var(X_1, axis=0)));
plt.scatter(xp, xp);
plt.xlabel('log mean over cells');
plt.ylabel('log var over cells');
plt.legend(('pbmc', 'mean=var'));
xp = np.linspace(-2,4)
plt.scatter(np.log(np.mean(X_2, axis=0)), np.log(np.var(X_2, axis=0)));
plt.scatter(xp, xp);
plt.xlabel('log mean over cells');
plt.ylabel('log var over cells');
plt.legend(('bladder', 'mean=var'));
xp = np.linspace(-3,2)
plt.scatter(np.log(np.mean(X_3, axis=0)), np.log(np.var(X_3, axis=0)));
plt.scatter(xp, xp);
plt.xlabel('log mean over cells');
plt.ylabel('log var over cells');
plt.legend(('neuron', 'mean=var'));
def adjust(X, do):
if do:
return (X-np.mean(X, axis=0))/np.std(X, axis=0)
else:
return X
s_1 = np.sum(X_1, axis=1)
# s_2 = np.sum(X_2, axis=1)
# s_3 = np.sum(X_3, axis=1)
###Output
_____no_output_____
###Markdown
Define Encoder & Decoder
###Code
class Encoder(snt.AbstractModule):
def __init__(self, name='encoder'):
super(Encoder, self).__init__(name=name)
def _build(self, x):
h = snt.Linear(256)(x)
h = tf.nn.leaky_relu(h)
h = snt.Linear(16)(h)
h = tf.reshape(h, shape=(-1, 16))
return h
class Decoder(snt.AbstractModule):
def __init__(self, output_dim, name='decoder'):
super(Decoder, self).__init__(name=name)
self.output_dim = output_dim
def _build(self, x):
h = tf.reshape(x, shape=(-1, 16))
h = snt.Linear(16)(h)
h = tf.nn.leaky_relu(h)
h = snt.Linear(256)(h)
h = tf.nn.leaky_relu(h)
h = snt.Linear(self.output_dim, use_bias=False)(h)
h = tf.clip_by_value(tf.math.softplus(h), 1e-5, 1e6)
return h
###Output
_____no_output_____
###Markdown
Build Graph and train
###Code
X_2.shape
s_2.shape
X_2.shape
randc = np.random.choice(X_2.shape[0], size=2746, replace=False)
X = adjust(X_2, False)[randc, :]
s = np.sum(X_2, axis=-1)[randc]
s = s.reshape(-1,1)
s.shape
X.shape
tf.reset_default_graph()
# Set hyper-parameters.
batch_size = 25
num_training_updates = 40000
embedding_dim = 16
# The higher this value, the higher the capacity in the information bottleneck.
num_embeddings = 20
# commitment_cost should be set appropriately. It's often useful to try a couple
# of values. It mostly depends on the scale of the reconstruction cost
# (log p(x|z)). So if the reconstruction cost is 100x higher, the
# commitment_cost should also be multiplied with the same amount.
commitment_cost = 0.25
# Use EMA updates for the codebook (instead of the Adam optimizer).
# This typically converges faster, and makes the model less dependent on choice
# of the optimizer. In the VQ-VAE paper EMA updates were not used (but was
# developed afterwards). See Appendix of the paper for more details.
vq_use_ema = True
# This is only used for EMA updates.
decay = 0.99
learning_rate = 9e-4
# Data Loading.
train_dataset_iterator = (
tf.data.Dataset.from_tensor_slices((X, X, s))
.shuffle(10000)
.repeat(-1) # repeat indefinitely
.batch(batch_size)).make_one_shot_iterator()
train_dataset_batch = train_dataset_iterator.get_next()
def get_samples(sess, subset='train'):
return sess.run(train_dataset_batch)
# Build modules.
encoder = Encoder()
decoder = Decoder(X.shape[1])
if vq_use_ema:
vq_vae = snt.nets.VectorQuantizerEMA(
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
commitment_cost=commitment_cost,
decay=decay)
else:
vq_vae = snt.nets.VectorQuantizer(
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
commitment_cost=commitment_cost)
# Process inputs with conv stack, finishing with 1x1 to get to correct size.
x_in = tf.placeholder(tf.float32, shape=(None, X.shape[1]))
x_out = tf.placeholder(tf.float32, shape=(None, X.shape[1]))
s_mult = tf.placeholder(tf.float32, shape=(None, 1))
z = encoder(x_in)
# For training
vq_output_train = vq_vae(z, is_training=True)
x_recon = decoder(vq_output_train["quantize"])
#x_recon = decoder(z)
x_recon = s_mult*x_recon
recon_error = -tf.reduce_mean(tfd.Poisson(rate=x_recon).log_prob(x_out))
#recon_error = tf.reduce_mean(tf.square(x_out-x_recon))
loss = recon_error + vq_output_train["loss"]
# For evaluation, make sure is_training=False!
vq_output_eval = vq_vae(z, is_training=False)
x_recon_eval = decoder(vq_output_eval["quantize"])
# The following is a useful value to track during training.
# It indicates how many codes are 'active' on average.
perplexity = vq_output_train["perplexity"]
# Create optimizer and TF session.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
sess = tf.train.SingularMonitoredSession()
# Train.
train_res_recon_error = []
train_res_perplexity = []
for i in xrange(num_training_updates):
a, b, c = get_samples(sess)
feed_dict = {x_in: a, x_out: b, s_mult: c}
results = sess.run([train_op, recon_error, perplexity],
feed_dict=feed_dict)
train_res_recon_error.append(results[1])
train_res_perplexity.append(results[2])
if (i+1) % 500 == 0:
print('%d iterations' % (i+1))
print('recon_error: %.3f' % np.mean(train_res_recon_error[-200:]))
print('perplexity: %.3f' % np.mean(train_res_perplexity[-200:]))
print()
###Output
500 iterations
recon_error: 0.979
perplexity: 3.839
1000 iterations
recon_error: 0.575
perplexity: 4.278
1500 iterations
recon_error: 0.461
perplexity: 4.253
2000 iterations
recon_error: 0.408
perplexity: 4.270
2500 iterations
recon_error: 0.383
perplexity: 4.274
3000 iterations
recon_error: 0.365
perplexity: 4.297
3500 iterations
recon_error: 0.948
perplexity: 4.295
4000 iterations
recon_error: 1.256
perplexity: 5.583
4500 iterations
recon_error: 0.675
perplexity: 7.136
5000 iterations
recon_error: 0.629
perplexity: 7.118
5500 iterations
recon_error: 0.561
perplexity: 7.129
6000 iterations
recon_error: 0.489
perplexity: 7.305
6500 iterations
recon_error: 0.538
perplexity: 7.800
7000 iterations
recon_error: 0.497
perplexity: 7.764
7500 iterations
recon_error: 0.608
perplexity: 6.514
8000 iterations
recon_error: 0.531
perplexity: 6.900
8500 iterations
recon_error: 0.477
perplexity: 7.139
9000 iterations
recon_error: 0.462
perplexity: 7.241
9500 iterations
recon_error: 0.458
perplexity: 7.220
10000 iterations
recon_error: 0.645
perplexity: 7.099
10500 iterations
recon_error: 0.629
perplexity: 9.266
11000 iterations
recon_error: 0.580
perplexity: 9.331
11500 iterations
recon_error: 0.567
perplexity: 9.291
12000 iterations
recon_error: 0.560
perplexity: 9.157
12500 iterations
recon_error: 0.556
perplexity: 9.247
13000 iterations
recon_error: 0.547
perplexity: 9.452
13500 iterations
recon_error: 0.528
perplexity: 9.246
14000 iterations
recon_error: 0.545
perplexity: 9.357
14500 iterations
recon_error: 0.515
perplexity: 9.197
15000 iterations
recon_error: 0.529
perplexity: 8.862
###Markdown
Plot loss
###Code
f = plt.figure(figsize=(16,8))
ax = f.add_subplot(1,2,1)
ax.plot(train_res_recon_error)
ax.set_yscale('log')
ax.set_title('NMSE.')
ax = f.add_subplot(1,2,2)
ax.plot(train_res_perplexity)
ax.set_title('Average codebook usage (perplexity).');
###Output
_____no_output_____
###Markdown
View latent space
###Code
# Latent space
z_, vq_output_eval_, x_recon_, z_d_ = sess.run([z, vq_output_eval, x_recon, vq_output_train["quantize"]], feed_dict={x_in: X, x_out: X, s_mult: s})
plt.figure(figsize=(9,9))
plt.scatter(z_[:,0], z_[:,1], c=vq_output_eval_['encoding_indices'], cmap='tab20', s=10.0);
plt.scatter(z_d_[:,0], z_d_[:,1], c='red', s=20.0);
true_labels = np.concatenate((y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None], y_2[randc][:,None],y_2[randc][:,None]), axis=1)
plt.figure(figsize=(9,9))
plt.scatter(z_[:,0], z_[:, 1], cmap='tab20', c=np.reshape(y_2[randc], -1), s=10.0);
#plt.scatter(z_d_[:,0], z_d_[:,1], c='red', s=20.0);
###Output
_____no_output_____ |
tests/pruning.ipynb | ###Markdown
1. MPS with Iterative Pruning
###Code
mps = initialize_mps()
imgs = train_set[:100]
_imgs = []
for img in imgs:
_imgs.append( tens_picture(img) )
_imgs = np.array(_imgs)
img_cache = left_right_cache(mps, _imgs)
print(mps.max_bond)
for lr,comp in zip([0.1,0.08,0.05,0.03,0.02,0.01,0.01],[60,60,60,120,120,120,240]):
learning_epoch_cached(mps, _imgs, 1, lr, img_cache, batch_size = 100, max_bond = 512, cutoff = 1e-8)
print(mps.max_bond)
compress(mps, comp)
img_cache = left_right_cache(mps, _imgs)
computeNLL(mps, imgs)
###Output
_____no_output_____
###Markdown
2. MPS without Iterative Pruning
###Code
mps2 = initialize_mps()
img_cache2 = left_right_cache(mps2, _imgs)
for lr in [0.1,0.08,0.05,0.03,0.02,0.01,0.01]:
learning_epoch_cached(mps2, _imgs, 1, lr, img_cache2, batch_size = 100, max_bond = 512, cutoff = 1e-8)
computeNLL(mps2, imgs)
###Output
_____no_output_____
###Markdown
3. MPS3 compression of MPS2 after training
###Code
mps3 = compress_copy(mps2,240)
computeNLL(mps3, imgs)
plot_dbonds(mps)
plot_dbonds(mps2)
plot_dbonds(mps3)
plot_img(generate_sample(mps))
plot_img(generate_sample(mps2))
plot_img(generate_sample(mps3))
corr_img = partial_removal_img(test_set[3], fraction = .3, axis = 0)
plot_img(corr_img)
plot_rec(corr_img, reconstruct(mps, corr_img))
plot_rec(corr_img, reconstruct(mps2, corr_img))
plot_rec(corr_img, reconstruct(mps3, corr_img))
corr_img2 = partial_removal_img(train_set[51], fraction = .3, axis = 0)
plot_img(corr_img2)
plot_rec(corr_img2, reconstruct(mps, corr_img2))
plot_rec(corr_img2, reconstruct(mps2, corr_img2))
plot_rec(corr_img2, reconstruct(mps3, corr_img2))
#save_mps_sets(mps, train_set[:100], './pruning_test/iterative_pruning', test_set[:10])
#save_mps_sets(mps2, train_set[:100], './pruning_test/nO_pruning', test_set[:10])
#save_mps_sets(mps3, train_set[:100], './pruning_test/compression_after_training', test_set[:10])
#########
# LOAD
#########
#mps, train_set, test_set = load_mps_sets('./pruning_test/iterative_pruning')
#mps2, train_set, test_set = load_mps_sets('./pruning_test/nO_pruning')
#mps3, train_set, test_set = load_mps_sets('./pruning_test/compression_after_training')
###Output
_____no_output_____ |
Solutions/2-advanced-solution.ipynb | ###Markdown
Advanced Plotting Methods- Introduce techniques for adding an extra data dimension to a plot with colour and symbols- Highlighting and layering data on a plot- Post-production editing --- 1. Import Packages
###Code
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
--- 2. Import Data Using Pandas
###Code
# import excel data as a Pandas dataframe
tvz = pd.read_excel('../Data/TVZ.xlsx', sheet_name='Data')
# note the file path is set for being in the solutions folder
###Output
_____no_output_____
###Markdown
--- 3. Adding Another Dimension with Colour ScalesWe can add a third dimension to our x/y plots by applying a colour scale (colour map).
###Code
# assign Pandas dataframe columns
x = tvz.EffectivePorosity_VolPercent
y = tvz.SampleDepth_mMD
z = tvz.Vp_mps
fig, ax = plt.subplots(1,1,figsize=(6,3))
ax.scatter(
x,
y,
c=z,
cmap='magma',
)
# plot the colour bar
# cbar = fig.colorbar(cax) # will not work until we adjust the code above
# scatter plot kwargs
# s=0.5 # size, float
# c='k' # named colour, hex code, or numeric values (see advanced notebook)
# cmap='colour-map-name', append _r to the name to reverse the colour map
# Perceptually uniform colour maps
# these are reccomended
# viridis
# plasma
# inferno
# magma
# cividis
# Sequential colourmaps
# Greys
# Blues
# GnBu
# OrRd
# Other colour maps
# these are not reccomended
# hsv
# gist_rainbow
# rainbow
# colour bar kwargs
# shrink=0.5 # make the bar shorter
# fraction=0.15 # amount of axis that the bar will occupy
# aspect=10 # make the bar fatter using smaller numbers
ax.set_ylim(3500,0)
ax.set_ylabel('Depth [m-VD]')
ax.set_xlabel('Porosity [v/v]')
###Output
_____no_output_____
###Markdown
--- 4. Colour Scales: The Puzzling and the Perceptually UniformThere is a dizzying array of colour scales to choose from and it's easy to get carried away. We must choose wisely or we risk distorting how our data are perceived. Refer to [this paper](https://www.nature.com/articles/s41467-020-19160-7) to get a feel for how data are distorted by colour. Explore how perception is distorted by colour scale selecting using [this application](https://github.com/mycarta/Colormap-distorsions-Panel-apphow-to-use-the-app) (Hint: click on 'launch app' and wait for binder to load). Matplotlib provides [a useful resource](https://matplotlib.org/stable/tutorials/colors/colormaps.html) that helps with colour scale selection. **Rule of thumb: Avoid rainbow and prefer perceptually uniform.**
###Code
from IPython import display
display.Image("https://imgs.xkcd.com/comics/painbow_award.png")
###Output
_____no_output_____
###Markdown
--- 5. Advanced Scatter Plots with SeabornSeaborn is a powerful plotting tool that works well with Pandas.It's ideal for generating static plots for reports, presentations and publication. [Seaborn example gallery](https://seaborn.pydata.org/examples/index.html)
###Code
import seaborn as sns
fig, ax = plt.subplots(1,1,figsize=(8,3))
sns.scatterplot(
x='EffectivePorosity_VolPercent', # Pandas column name for x data
y='SampleDepth_mMD', # Pandas column name for y data
data=tvz, # Pandas dataframe name
ax=ax, # name of the axis that the seaborn plot goes in
s=90, # marker size
hue='RockCode', # marker colour
style='AlterationAssemblage', # marker style
legend=True, # turns legend on and off
)
ax.set_xlim(0,60)
ax.set_ylim(3500,0)
ax.set_xlabel('Porosity [v/v]')
ax.set_ylabel('Depth [m-VD]')
# Place legend right of the axis
ax.legend(
loc='center left',
bbox_to_anchor=(1.05, 0.45),
ncol=1,
);
###Output
_____no_output_____
###Markdown
--- 6. Plot Element LayeringSometimes you want to place one set of data on top of another in a plot, or all of the data on top of the grid. We use zorder to do this. There is some issue with zorder and grids, so a bit of trial and error is typically required.
###Code
x = [3,5]
y = [10,12]
fig, ax = plt.subplots(1,1,figsize=(5,5))
ax.plot(
x,
y,
linewidth=5,
color='r',
)
ax.scatter(
x,
y,
marker='o',
s=400,
zorder=4
)
ax.grid(linewidth=3)
###Output
_____no_output_____
###Markdown
--- 7. Highlights and FillsThree methods that can be used to highlight data on a plot. 7.1 Fill Between LinesFill between lines in either the [x direction](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.fill_betweenx.htmlmatplotlib.pyplot.fill_betweenx) or [y direction](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.fill_between.html)
###Code
fig, (ax0, ax1) = plt.subplots(1,2,figsize=(6,3), sharey=True)
plt.tight_layout()
# fill between lines in the y axis direction
x = [2, 3, 4, 5, 6]
y1 = [22, 30, 20, 33, 40]
y2 = [30, 35, 25, 20, 30]
ax0.plot(x, y1)
ax0.plot(x, y2)
ax0.fill_between(x, y1, y2, alpha=0.2)
# Fill between lines in the x axis direction
y = [20, 25, 30, 35, 40]
x1 = [2, 2, 3, 6, 4]
x2 = [5, 4, 4, 5, 5]
ax1.plot(x1, y)
ax1.plot(x2, y)
ax1.fill_betweenx(y, x1, x2, alpha=0.2)
###Output
_____no_output_____
###Markdown
7.2 Horizontal or Vertical LinesAdd [horizontal](https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.hlines.html?highlight=hlinesmatplotlib.axes.Axes.hlines) or [vertical](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.vlines.html) lines
###Code
fig, ax = plt.subplots(1,1,figsize=(5,3))
plt.tight_layout()
x = [2, 3, 4, 5, 6]
y = [25, 30, 35, 30, 40]
ax.scatter(x, y)
# add a horizontal line
ax.hlines(
30, # y axis location
2, # start of line on x axis
6, # end of line on x axis
)
# add a vertical line
ax.vlines(
3, # x axis location
25, # start of line on y axis
35, # end of line on y axis
);
# kwargs to try
# color = '' # named colour or hex value
# linewidth = 0.5 # float
# linestyle = '' # '-' '--' '-.' ':'
###Output
_____no_output_____
###Markdown
7.3 Coloured or Hashed BoxShade a zone on the plot that spans either the [x axis]() or [y axis](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.axvspan.html) direction
###Code
fig, ax = plt.subplots(1,1,figsize=(5,3))
plt.tight_layout()
x = [2, 3, 4, 5, 6]
y = [25, 30, 35, 30, 40]
ax.scatter(x, y)
# span x axis
ax.axvspan(
3, # min on x axis
4, # max on x axis
ymin=0.3, # bottom of box, propotion from 0-1
ymax=0.9, # top of box, propotion from 0-1
alpha=0.2,
)
# span y axis
ax.axhspan(
35,
37.5,
alpha=0.2,
xmin=0.3,
xmax=0.1,
);
# other kwargs to try
# color = '' # named colour or hex value
# hatch = '' # '/' '\' '|' '-' '+' 'x' 'o' 'O' '.' '*'
# edgecolor = '' # named colour or hex value
# facecolor = '' # named colour or hex value
###Output
_____no_output_____ |
notebooks/T4 - 1 - Linear Regression - Datos ficticios_Py38.ipynb | ###Markdown
Modelos de Regresión Lineal Modelo con datos simulados* y = a + b * x* X : 100 valores distribuídos según una N(1.5, 2.5)* Ye = 5 + 1.9 * x + e* e estará distribuído según una N(0, 0.8)
###Code
import pandas as pd
import numpy as np
x = 1.5 + 2.5 * np.random.randn(100)
res = 0 + 0.8 * np.random.randn(100)
y_pred = 5 + 0.3 * x
y_act = 5 + 0.3 * x + res
x_list = x.tolist()
y_pred_list = y_pred.tolist()
y_act_list = y_act.tolist()
data = pd.DataFrame(
{
"x":x_list,
"y_actual":y_act_list,
"y_prediccion":y_pred_list
}
)
data.head()
import matplotlib.pyplot as plt
y_mean = [np.mean(y_act) for i in range(1, len(x_list) + 1)]
%matplotlib inline
plt.plot(data["x"],data["y_prediccion"])
plt.plot(data["x"], data["y_actual"], "ro")
plt.plot(data["x"],y_mean, "g")
plt.title("Valor Actual vs Predicción")
###Output
_____no_output_____
###Markdown
¿Como es la predicción de buena?* SST = SSD + SSR* SST : Variabilidad de los datos con respecto de su media* SSD : Diferencia entre los datos originales y las predicciones que el modelo no es capaz de explicar (errores que deberían seguir una distribución normal)* SSR : Diferencia entre la regresión y el valor medio que el modelo busca explicar* R2 = SSR / SST, coeficiente de determinación entre 0 y 1
###Code
y_m = np.mean(y_act)
data["SSR"]=(data["y_prediccion"]-y_m)**2
data["SSD"]=(data["y_prediccion"]-data["y_actual"])**2
data["SST"]=(data["y_actual"]-y_m)**2
data.head()
SSR = sum(data["SSR"])
SSD = sum(data["SSD"])
SST = sum(data["SST"])
SSR
SSD
SST
SSR+SSD
R2 = SSR/SST
R2
plt.hist(data["y_prediccion"]-data["y_actual"])
###Output
_____no_output_____
###Markdown
Obteniendo la recta de regresión * y = a + b * x* b = sum((xi - x_m)*(y_i-y_m))/sum((xi-x_m)^2)* a = y_m - b * x_m
###Code
x_mean = np.mean(data["x"])
y_mean = np.mean(data["y_actual"])
x_mean, y_mean
data["beta_n"] = (data["x"]-x_mean)*(data["y_actual"]-y_mean)
data["beta_d"] = (data["x"]-x_mean)**2
beta = sum(data["beta_n"])/sum(data["beta_d"])
alpha = y_mean - beta * x_mean
alpha, beta
###Output
_____no_output_____
###Markdown
El modelo lineal obtenido por regresión es:y = 5.042341442370516 + 1.9044490309709992 * x
###Code
data["y_model"] = alpha + beta * data["x"]
data.head()
SSR = sum((data["y_model"]-y_mean)**2)
SSD = sum((data["y_model"]-data["y_actual"])**2)
SST = sum((data["y_actual"]-y_mean)**2)
SSR, SSD, SST
R2 = SSR / SST
R2
y_mean = [np.mean(y_act) for i in range(1, len(x_list) + 1)]
%matplotlib inline
plt.plot(data["x"],data["y_prediccion"])
plt.plot(data["x"], data["y_actual"], "ro")
plt.plot(data["x"],y_mean, "g")
plt.plot(data["x"], data["y_model"])
plt.title("Valor Actual vs Predicción")
###Output
_____no_output_____
###Markdown
Error estándar de los residuos (RSE)
###Code
RSE = np.sqrt(SSD/(len(data)-2))
RSE
np.mean(data["y_actual"])
RSE / np.mean(data["y_actual"])
###Output
_____no_output_____ |
content/lessons/09/Watch-Me-Code/WMC2-List-Functions.ipynb | ###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.insert)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
colors.index('blue')
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____
###Markdown
Watch Me Code 1: List Basics
###Code
colors = [] #empty list
type(colors)
dir(colors)
help(colors.index)
colors.append("orange")
colors.append("blue")
colors.append("green")
colors.append("white")
print(colors)
print(len(colors))
colors.remove("green")
print(colors)
#lists are mutable
colors.reverse()
colors
index = colors.index('blue')
print(index)
colors.index('brown') # ValueError
###Output
_____no_output_____ |
2022-spring-part-2/seminars/09_time_series/09_time_series.ipynb | ###Markdown
Машинное обучение 2 Семинар 9. Временные ряды14 апреля 2022 План занятия1. Чтение данных - resampling - заполнение пробелов2. Предобработка и оценка стационарности: - дифференцирование - log - BoxCox3. Метрики4. Построение модели **ARIMA** - верхняя оценка параметров p, q - выбор оптимальных значений параметров5. Построение модели **Prophet** от facebook - Оценка параметров6. Дополнительные материалы
###Code
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
%matplotlib inline
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 10, 6
###Output
_____no_output_____
###Markdown
1. Чтение данныхDownload link:https://drive.google.com/drive/folders/1IOFHwyCi1AclcgR5NUuC-4nSdn7CQFYt?usp=sharing
###Code
path = "AirPassengers.csv"
# path = "monthly-car-sales.csv"
# path = "example_wp_log_peyton_manning.csv"
dataset = pd.read_csv(path)
dataset.head()
# Parse strings to datetime type
df = pd.DataFrame({
'date': pd.to_datetime(dataset['Month'], infer_datetime_format=True),
'y': dataset['#Passengers']})
df = df.set_index(['date'])
print(df.shape)
df.head(5)
plt.xlabel('Date')
plt.ylabel('Number of air passengers')
plt.plot(df)
plt.show()
help(df.resample)
# resample into years bins and mean the values
# M is month end frequency
df_year = df.resample('12M').mean()
print(df_year.shape)
plt.figure()
plt.plot(df_year, '.')
plt.show()
###Output
(13, 1)
###Markdown
Удаление пропущенных значений:- заполнение средним значением- заполнение последним значением- линейная интерполяция
###Code
df_copy = df.copy()
df_copy[40:50] = float('nan')
df_copy[80:85] = float('nan')
# df_copy = df_copy.fillna(-100)
# fill with mean value
mean_val = df_copy.mean()
# df_copy = df_copy.fillna(mean_val)
# forward fill
# df_copy = df_copy.ffill()
# interpolate values
df_copy = df_copy.interpolate()
plt.figure()
plt.xlabel('Date')
plt.ylabel('Number of air passengers')
plt.plot(df_copy)
plt.show()
###Output
_____no_output_____
###Markdown
2. Предобработка и оценка стационарности
###Code
# Determine rolling statistics
rolmean = df.rolling(window=12).mean() # window size 12 denotes 12 months, giving rolling mean at yearly level
rolstd = df.rolling(window=12).std()
#Plot rolling statistics
plt.figure()
plt.plot(df, '--', color='blue', label='Original')
plt.plot(rolmean, color='red', label='Rolling Mean')
plt.plot(rolstd, '.', color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
###Output
_____no_output_____
###Markdown
Стационарность: - const среднее значение и дисперсия - Критерий Дика-Фуллера
###Code
from statsmodels.tsa.stattools import adfuller
help(adfuller)
#Perform Augmented Dickey–Fuller test:
adf, pvalue, usedlag, nobs, critical_values, icbest = adfuller(df.y, autolag='AIC')
print(f'Results of Dickey Fuller Test: ')
print(f'usedlag:{usedlag}')
print(f'p-value:{pvalue:.2f}')
print(f'ADF statistic:{adf:.2f}')
print(critical_values)
###Output
Results of Dickey Fuller Test:
usedlag:13
p-value:0.99
ADF statistic:0.82
{'1%': -3.4816817173418295, '5%': -2.8840418343195267, '10%': -2.578770059171598}
###Markdown
Augmented Dickey-Fuller (ADF) test is a type of statistical test called a unit root test. Unit roots are a cause for non-stationarity.**Null Hypothesis (H0)**: Time series has a unit root. (Time series is not stationary).**Alternate Hypothesis (H1)**: Time series has no unit root (Time series is stationary).If the null hypothesis can be rejected, we can conclude that the time series is stationary.There are two ways to rejects the null hypothesis:On the one hand, the null hypothesis can be rejected if the p-value is below a set significance level. The defaults significance level is 5%- **p-value > significance level (default: 0.05)**: Fail to reject the null hypothesis (H0), the data has a unit root and is non-stationary.- **p-value <= significance level (default: 0.05)**: Reject the null hypothesis (H0), the data does not have a unit root and is stationary.On the other hand, the null hypothesis can be rejects if the test statistic is less than the critical value.- **ADF statistic > critical value**: Fail to reject the null hypothesis (H0), the data has a unit root and is non-stationary.- **ADF statistic < critical value**: Reject the null hypothesis (H0), the data does not have a unit root and is stationary. Способы приведения к стационарности: - преобразование (log, BoxCox) - дифференцирование
###Code
from scipy import stats
# log
df_log = df.copy()
df_log.y = np.log(df.y)
plt.plot(df_log)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_log.y, autolag='AIC')
print(f'LOG p-value: {pvalue:.2f}' )
# BoxCox
df_boxcox = df.copy()
df_boxcox.y, lmbda_boxcox = stats.boxcox(df.y)
plt.plot(df_boxcox)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_boxcox.y, autolag='AIC')
print(f'BoxCox p-value: {pvalue:.2f}' )
print(f'Lambda: {lmbda_boxcox}')
# diff
df_diff = df.copy()
df_diff.y = np.append([0], np.diff(df.y))
plt.plot(df_diff)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff.y, autolag='AIC')
print(f'Diff p-value: {pvalue:.2f}')
# boxcox + diff
df_diff_boxcox = df.copy()
df_diff_boxcox.y = np.append([0], np.diff(df_boxcox.y))
plt.plot(df_diff_boxcox)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff_boxcox.y, autolag='AIC')
print(f'BoxCox + Diff p-value: {pvalue:.2f}')
# log + diff
df_diff_log = df.copy()
df_diff_log.y = np.append([0], np.diff(df_log.y))
plt.plot(df_diff_log)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff_log.y, autolag='AIC')
print(f'Log + Diff p-value: {pvalue:.2f}')
# boxcox + diff^2
df_diff2_boxcox = df.copy()
df_diff2_boxcox.y = np.append([0], np.diff(df_diff_boxcox.y))
plt.plot(df_diff2_boxcox)
plt.show()
_, pvalue, _, _, _, _ = adfuller(df_diff2_boxcox.y, autolag='AIC')
print(f'Log + Diff^2 p-value: {pvalue:.2f}')
###Output
_____no_output_____
###Markdown
3. МетрикиMean Squared Error: $MSE = \frac{1}{T}\sum\limits_{t}{(y_t - \hat{y}_t)^2}$Root Mean Squared Error: $RMSE = \sqrt{MSE} = \sqrt{\frac{1}{T}\sum\limits_{t}{(y_t - \hat{y}_t)^2}}$Mean Absolute Error: $MAE = \frac{1}{T}\sum\limits_{t}{\lvert{y_t - \hat{y}_t}\rvert}$Mean Absolute Percentage Error: $MAPE = \frac{100\%}{T}\sum\limits_{t}{\lvert\frac{y_t - \hat{y}_t}{y_t}\rvert}$Weighted Absolute Percentage Error: $WAPE = {\frac{\sum\limits_{t}{|y_t - \hat{y}_t|}}{\sum\limits_{t}y_t}}$
###Code
def rmse(y, y_hat):
return np.sqrt(np.sum(np.square(y - y_hat)) / len(y))
# TODO: implement other 1-2 metrics
###Output
_____no_output_____
###Markdown
4. Построение модели ARIMA $\Delta^d X_t = c + \sum\limits_{t=1}^p \Delta^d X_{t-i} + \sum\limits_{t=1}^q b_j \varepsilon_{t-i} + \varepsilon_{t}$**ARIMA** = AutoRegressive + Integrated + Moving Average**AR**: Autoregression. A model that uses the dependent relationship between an observation and some number of lagged observations.**I**: Integrated. The use of differencing of raw observations (e.g. subtracting an observation from an observation at the previous time step) in order to make the time series stationary.**MA**: Moving Average. A model that uses the dependency between an observation and a residual error from a moving average model applied to lagged observations. Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF)- **Autocorrelation Function (ACF)**: Correlation between time series with a lagged version of itself. The autocorrelation function starts a lag 0, which is the correlation of the time series with itself and therefore results in a correlation of 1. -> MA parameter is **q** significant lags- **Partial Autocorrelation Function (PACF)**: Additional correlation explained by each successive lagged term -> AR parameter is **p** significant lags
###Code
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
print("RAW")
f, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 8))
plot_acf(df.y, lags=24, ax=ax[0])
plot_pacf(df.y, lags=24, ax=ax[1])
plt.show()
print("BoxCox + diff")
f, ax = plt.subplots(nrows=2, ncols=1, figsize=(16, 8))
plot_acf(df_diff_boxcox.y,lags=12, ax=ax[0])
plot_pacf(df_diff_boxcox.y,lags=12, ax=ax[1])
plt.show()
# Some inverse stuff
def invboxcox(y,lmbda):
if lmbda == 0:
return(np.exp(y))
else:
return(np.exp(np.log(lmbda*y+1)/lmbda))
def invdiff(y_diff, y0):
return np.cumsum(y_diff) + y0
inv_diff_boxcox = lambda y: invboxcox(invdiff(y, df_boxcox.y[0]), lmbda_boxcox)
inv_diff2_boxcox = lambda y: invboxcox(invdiff(invdiff(y, df_diff_boxcox.y[0]), df_boxcox.y[0]), lmbda_boxcox)
inv_boxcox = lambda y: invboxcox(y, lmbda_boxcox)
plt.plot(inv_diff2_boxcox(df_diff2_boxcox.y))
plt.plot(inv_boxcox(df_boxcox.y))
# plt.plot(df.y)
plt.show()
from statsmodels.tsa.arima.model import ARIMA
TEST_SIZE = 12 # a year
TRAIN_SIZE = len(df.y) - TEST_SIZE
df_input = df_boxcox.copy()
df_train = df_input.y[:TRAIN_SIZE]
df_test = df_input.y[TRAIN_SIZE:]
d = 1
inv_func = inv_diff_boxcox
params_pdq = (2, d, 2)
try:
model_ARIMA = ARIMA(df_train, order=params_pdq).fit(
# maxiter=500,
# tol=1.e-6,
# solver='lbfgs',
# default is 'lbfgs' (limited memory
# Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
# 'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
# (conjugate gradient), 'ncg' (non-conjugate gradient), and
# 'powell'.
)
except:
print("ERROR")
TRAIN_SIZE
print(model_ARIMA.summary())
model_ARIMA.forecast()[0], df_input.y[TRAIN_SIZE]
arima_predict = model_ARIMA.predict(start=TRAIN_SIZE, end=TRAIN_SIZE + TEST_SIZE - 1)
arima_predict_full = df_input.copy()
arima_predict_full.y[:TRAIN_SIZE] = df_diff_boxcox.y[:TRAIN_SIZE]
arima_predict_full.y[TRAIN_SIZE:] = arima_predict
inv_arima_predict_full = arima_predict_full.copy()
inv_arima_predict_full = inv_func(arima_predict_full.y)
plt.figure()
plt.plot(df_diff_boxcox)
plt.plot(model_ARIMA.fittedvalues)
plt.plot(arima_predict_full[TRAIN_SIZE:])
plt.legend(['train', 'fitted', 'predict'])
plt.show()
plt.figure()
plt.plot(df.y[:TRAIN_SIZE])
plt.plot(inv_arima_predict_full[TRAIN_SIZE:])
plt.plot(df.y[TRAIN_SIZE:])
plt.legend(['train', 'predict', 'ground truth'])
plt.show()
print('RMSE: ', rmse(df.y[TRAIN_SIZE:], inv_arima_predict_full[TRAIN_SIZE:]))
###Output
_____no_output_____
###Markdown
Задание 1.Перебрать значения $p, q$ и выбрать наилучшую модель по критерию AIC, посчитать для неё RMSE и MAPE 5. Построение модели facebook Prophet
###Code
# !pip install pystan~=2.14
!pip install fbprophet
from fbprophet import Prophet
df_for_prophet = pd.DataFrame({'ds': df.index, 'y': df.y.values})
model_prophet = Prophet().fit(df_for_prophet[:TRAIN_SIZE])
future = model_prophet.make_future_dataframe(periods=TEST_SIZE, freq='MS')
prophet_forecast = model_prophet.predict(future)
print(len(prophet_forecast))
plt.figure()
plt.plot(df.y[:TRAIN_SIZE])
plt.plot(df.index[TRAIN_SIZE:], prophet_forecast['yhat'].values[TRAIN_SIZE:])
plt.plot(df.y[TRAIN_SIZE:])
plt.legend(['train', 'predict', 'ground truth'])
plt.show()
print('RMSE: ', rmse(df.y[TRAIN_SIZE:], prophet_forecast['yhat'].values[TRAIN_SIZE:]))
path = "example_wp_log_peyton_manning.csv"
df_for_prophet = pd.read_csv(path)
print(len(df_for_prophet))
df_for_prophet.tail()
model_prophet = Prophet().fit(df_for_prophet[:-30])
future = model_prophet.make_future_dataframe(periods=360)
prophet_forecast = model_prophet.predict(future)
print(len(prophet_forecast))
fig1 = model_prophet.plot(prophet_forecast)
###Output
_____no_output_____ |
ch13/ch13_part2.ipynb | ###Markdown
Machine Learning with PyTorch and Scikit-Learn -- Code Examples Package version checks Add folder to path in order to load from the check_packages.py script:
###Code
import sys
sys.path.insert(0, '..')
###Output
_____no_output_____
###Markdown
Check recommended package versions:
###Code
from python_environment_check import check_packages
d = {
'numpy': '1.21.2',
'pandas': '1.3.2',
'sklearn': '1.0',
'torch': '1.8',
'torchvision': '0.9.0'
}
check_packages(d)
###Output
[OK] Your Python version is 3.8.12 | packaged by conda-forge | (default, Oct 12 2021, 21:59:51)
[GCC 9.4.0]
[OK] numpy 1.22.0
[OK] pandas 1.4.1
[OK] sklearn 1.0.2
[OK] torch 1.10.1+cu102
[OK] torchvision 0.11.2+cu102
###Markdown
Chapter 13: Going Deeper -- the Mechanics of PyTorch (Part 2/3) **Outline**- [Project one - predicting the fuel efficiency of a car](Project-one----predicting-the-fuel-efficiency-of-a-car) - [Working with feature columns](Working-with-feature-columns) - [Training a DNN regression model](Training-a-DNN-regression-model)- [Project two - classifying MNIST handwritten digits](Project-two----classifying-MNIST-handwritten-digits)
###Code
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from IPython.display import Image
###Output
_____no_output_____
###Markdown
Project one - predicting the fuel efficiency of a car Working with feature columns
###Code
Image(filename='figures/13_07.png', width=700)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
df = pd.read_csv(url, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
df.tail()
print(df.isna().sum())
df = df.dropna()
df = df.reset_index(drop=True)
df.tail()
import sklearn
import sklearn.model_selection
df_train, df_test = sklearn.model_selection.train_test_split(df, train_size=0.8, random_state=1)
train_stats = df_train.describe().transpose()
train_stats
numeric_column_names = ['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration']
df_train_norm, df_test_norm = df_train.copy(), df_test.copy()
for col_name in numeric_column_names:
mean = train_stats.loc[col_name, 'mean']
std = train_stats.loc[col_name, 'std']
df_train_norm.loc[:, col_name] = (df_train_norm.loc[:, col_name] - mean)/std
df_test_norm.loc[:, col_name] = (df_test_norm.loc[:, col_name] - mean)/std
df_train_norm.tail()
boundaries = torch.tensor([73, 76, 79])
v = torch.tensor(df_train_norm['Model Year'].values)
df_train_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
v = torch.tensor(df_test_norm['Model Year'].values)
df_test_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
numeric_column_names.append('Model Year Bucketed')
from torch.nn.functional import one_hot
total_origin = len(set(df_train_norm['Origin']))
origin_encoded = one_hot(torch.from_numpy(df_train_norm['Origin'].values) % total_origin)
x_train_numeric = torch.tensor(df_train_norm[numeric_column_names].values)
x_train = torch.cat([x_train_numeric, origin_encoded], 1).float()
origin_encoded = one_hot(torch.from_numpy(df_test_norm['Origin'].values) % total_origin)
x_test_numeric = torch.tensor(df_test_norm[numeric_column_names].values)
x_test = torch.cat([x_test_numeric, origin_encoded], 1).float()
y_train = torch.tensor(df_train_norm['MPG'].values).float()
y_test = torch.tensor(df_test_norm['MPG'].values).float()
from torch.utils.data import DataLoader, TensorDataset
train_ds = TensorDataset(x_train, y_train)
batch_size = 8
torch.manual_seed(1)
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
hidden_units = [8, 4]
input_size = x_train.shape[1]
all_layers = []
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 1))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 200
log_epochs = 20
for epoch in range(num_epochs):
loss_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)[:, 0]
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_hist_train += loss.item()
if epoch % log_epochs==0:
print(f'Epoch {epoch} Loss {loss_hist_train/len(train_dl):.4f}')
with torch.no_grad():
pred = model(x_test.float())[:, 0]
loss = loss_fn(pred, y_test)
print(f'Test MSE: {loss.item():.4f}')
print(f'Test MAE: {nn.L1Loss()(pred, y_test).item():.4f}')
###Output
Test MSE: 9.6133
Test MAE: 2.1211
###Markdown
Project two - classifying MNIST hand-written digits
###Code
import torchvision
from torchvision import transforms
image_path = './'
transform = transforms.Compose([transforms.ToTensor()])
mnist_train_dataset = torchvision.datasets.MNIST(root=image_path,
train=True,
transform=transform,
download=True)
mnist_test_dataset = torchvision.datasets.MNIST(root=image_path,
train=False,
transform=transform,
download=False)
batch_size = 64
torch.manual_seed(1)
train_dl = DataLoader(mnist_train_dataset, batch_size, shuffle=True)
hidden_units = [32, 16]
image_size = mnist_train_dataset[0][0].shape
input_size = image_size[0] * image_size[1] * image_size[2]
all_layers = [nn.Flatten()]
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 10))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 20
for epoch in range(num_epochs):
accuracy_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
is_correct = (torch.argmax(pred, dim=1) == y_batch).float()
accuracy_hist_train += is_correct.sum()
accuracy_hist_train /= len(train_dl.dataset)
print(f'Epoch {epoch} Accuracy {accuracy_hist_train:.4f}')
pred = model(mnist_test_dataset.data / 255.)
is_correct = (torch.argmax(pred, dim=1) == mnist_test_dataset.targets).float()
print(f'Test accuracy: {is_correct.mean():.4f}')
###Output
Test accuracy: 0.9645
###Markdown
---Readers may ignore the next cell.
###Code
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
###Output
[NbConvertApp] Converting notebook ch13_part2.ipynb to script
[NbConvertApp] Writing 7052 bytes to ch13_part2.py
###Markdown
*Python Machine Learning 3rd Edition* by [Sebastian Raschka](https://sebastianraschka.com) & [Vahid Mirjalili](http://vahidmirjalili.com), Packt Publishing Ltd. 2019Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-editionCode License: [MIT License](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/LICENSE.txt) Chapter 13: Parallelizing Neural Network Training with TensorFlow (Part 2/2) Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
###Code
%load_ext watermark
%watermark -a "Sebastian Raschka & Vahid Mirjalili" -u -d -p numpy,scipy,matplotlib,tensorflow
%matplotlib inline
###Output
_____no_output_____
###Markdown
Building a neural network model in TensorFlow The TensorFlow Keras API (tf.keras) Building a linear regression model
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
X_train = np.arange(10).reshape((10, 1))
y_train = np.array([1.0, 1.3, 3.1,
2.0, 5.0, 6.3,
6.6, 7.4, 8.0,
9.0])
plt.plot(X_train, y_train, 'o', markersize=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
X_train_norm = (X_train - np.mean(X_train))/np.std(X_train)
ds_train_orig = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train_norm, tf.float32),
tf.cast(y_train, tf.float32)))
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.w = tf.Variable(0.0, name='weight')
self.b = tf.Variable(0.0, name='bias')
def call(self, x):
return self.w*x + self.b
model = MyModel()
model.build(input_shape=(None, 1))
model.summary()
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
## testing the function:
yt = tf.convert_to_tensor([1.0])
yp = tf.convert_to_tensor([1.5])
loss_fn(yt, yp)
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as tape:
current_loss = loss_fn(model(inputs), outputs)
dW, db = tape.gradient(current_loss, [model.w, model.b])
model.w.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
tf.random.set_seed(1)
num_epochs = 200
log_steps = 100
learning_rate = 0.001
batch_size = 1
steps_per_epoch = int(np.ceil(len(y_train) / batch_size))
ds_train = ds_train_orig.shuffle(buffer_size=len(y_train))
ds_train = ds_train.repeat(count=None)
ds_train = ds_train.batch(1)
Ws, bs = [], []
for i, batch in enumerate(ds_train):
if i >= steps_per_epoch * num_epochs:
break
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
bx, by = batch
loss_val = loss_fn(model(bx), by)
train(model, bx, by, learning_rate=learning_rate)
if i%log_steps==0:
print('Epoch {:4d} Step {:2d} Loss {:6.4f}'.format(
int(i/steps_per_epoch), i, loss_val))
print('Final Parameters:', model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training examples', 'Linear Reg.'], fontsize=15)
ax.set_xlabel('x', size=15)
ax.set_ylabel('y', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['Weight w', 'Bias unit b'], fontsize=15)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Value', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
#plt.savefig('ch13-linreg-1.pdf')
plt.show()
###Output
Final Parameters: 2.6576622 4.8798566
###Markdown
Model training via the .compile() and .fit() methods
###Code
tf.random.set_seed(1)
model = MyModel()
#model.build((None, 1))
model.compile(optimizer='sgd',
loss=loss_fn,
metrics=['mae', 'mse'])
model.fit(X_train_norm, y_train,
epochs=num_epochs, batch_size=batch_size,
verbose=1)
print(model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training Samples', 'Linear Regression'], fontsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['W', 'bias'], fontsize=15)
plt.show()
###Output
2.7058775 4.971019
###Markdown
Building a multilayer perceptron for classifying flowers in the Iris dataset
###Code
import tensorflow_datasets as tfds
iris, iris_info = tfds.load('iris', with_info=True)
print(iris_info)
tf.random.set_seed(1)
ds_orig = iris['train']
ds_orig = ds_orig.shuffle(150, reshuffle_each_iteration=False)
print(next(iter(ds_orig)))
ds_train_orig = ds_orig.take(100)
ds_test = ds_orig.skip(100)
## checking the number of examples:
n = 0
for example in ds_train_orig:
n += 1
print(n)
n = 0
for example in ds_test:
n += 1
print(n)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
next(iter(ds_train_orig))
model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='sigmoid',
name='fc1', input_shape=(4,)),
tf.keras.layers.Dense(3, name='fc2', activation='softmax')])
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
num_epochs = 100
training_size = 100
batch_size = 2
steps_per_epoch = np.ceil(training_size / batch_size)
ds_train = ds_train_orig.shuffle(buffer_size=training_size)
ds_train = ds_train.repeat()
ds_train = ds_train.batch(batch_size=batch_size)
ds_train = ds_train.prefetch(buffer_size=1000)
history = model.fit(ds_train, epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=0)
hist = history.history
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(hist['loss'], lw=3)
ax.set_title('Training loss', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(hist['accuracy'], lw=3)
ax.set_title('Training accuracy', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
#plt.savefig('ch13-cls-learning-curve.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
Evaluating the trained model on the test dataset
###Code
results = model.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
###Output
Test loss: 0.1491 Test Acc.: 1.0000
###Markdown
Saving and reloading the trained model
###Code
model.save('iris-classifier.h5',
overwrite=True,
include_optimizer=True,
save_format='h5')
model_new = tf.keras.models.load_model('iris-classifier.h5')
model_new.summary()
results = model_new.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
model.to_json()
###Output
_____no_output_____
###Markdown
Choosing activation functions for multilayer neural networks Logistic function recap
###Code
import numpy as np
X = np.array([1, 1.4, 2.5]) ## first value must be 1
w = np.array([0.4, 0.3, 0.5])
def net_input(X, w):
return np.dot(X, w)
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w))
# W : array with shape = (n_output_units, n_hidden_units+1)
# note that the first column are the bias units
W = np.array([[1.1, 1.2, 0.8, 0.4],
[0.2, 0.4, 1.0, 0.2],
[0.6, 1.5, 1.2, 0.7]])
# A : data array with shape = (n_hidden_units + 1, n_samples)
# note that the first column of this array must be 1
A = np.array([[1, 0.1, 0.4, 0.6]])
Z = np.dot(W, A[0])
y_probas = logistic(Z)
print('Net Input: \n', Z)
print('Output Units:\n', y_probas)
y_class = np.argmax(Z, axis=0)
print('Predicted class label: %d' % y_class)
###Output
Predicted class label: 0
###Markdown
Estimating class probabilities in multiclass classification via the softmax function
###Code
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
np.sum(y_probas)
import tensorflow as tf
Z_tensor = tf.expand_dims(Z, axis=0)
tf.keras.activations.softmax(Z_tensor)
###Output
_____no_output_____
###Markdown
Broadening the output spectrum using a hyperbolic tangent
###Code
import matplotlib.pyplot as plt
%matplotlib inline
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('Net input $z$')
plt.ylabel('Activation $\phi(z)$')
plt.axhline(1, color='black', linestyle=':')
plt.axhline(0.5, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.axhline(-0.5, color='black', linestyle=':')
plt.axhline(-1, color='black', linestyle=':')
plt.plot(z, tanh_act,
linewidth=3, linestyle='--',
label='Tanh')
plt.plot(z, log_act,
linewidth=3,
label='Logistic')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
np.tanh(z)
import tensorflow as tf
tf.keras.activations.tanh(z)
from scipy.special import expit
expit(z)
tf.keras.activations.sigmoid(z)
###Output
_____no_output_____
###Markdown
Rectified linear unit activation
###Code
import tensorflow as tf
tf.keras.activations.relu(z)
###Output
_____no_output_____
###Markdown
Summary Appendix Splitting a dataset: danger of mixing train/test examples
###Code
## the correct way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=False)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
## The wrong way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=True)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
###Output
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
###Markdown
Splitting a dataset using `tfds.Split`
###Code
##--------------------------- Attention ------------------------##
## ##
## Note: currently, tfds.Split has a bug in TF 2.0.0 ##
## ##
## I.e., splitting [2, 1] is expected to result in ##
## 100 train and 50 test examples ##
## ##
## but instead, it results in 116 train and 34 test examples ##
## ##
##--------------------------------------------------------------##
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
## method 1: specifying percentage:
#first_67_percent = tfds.Split.TRAIN.subsplit(tfds.percent[:67])
#last_33_percent = tfds.Split.TRAIN.subsplit(tfds.percent[-33:])
#ds_train_orig = tfds.load('iris', split=first_67_percent)
#ds_test = tfds.load('iris', split=last_33_percent)
## method 2: specifying the weights
split_train, split_test = tfds.Split.TRAIN.subsplit([2, 1])
ds_train_orig = tfds.load('iris', split=split_train)
ds_test = tfds.load('iris', split=split_test)
print(next(iter(ds_train_orig)))
print()
print(next(iter(ds_test)))
ds_train_orig = ds_train_orig.shuffle(100, reshuffle_each_iteration=True)
ds_test = ds_test.shuffle(50, reshuffle_each_iteration=False)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
print(next(iter(ds_train_orig)))
for j in range(5):
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
labels_test = np.array(labels_test)
print(np.sum(labels_test == 0), np.sum(labels_test == 1), np.sum(labels_test == 2))
###Output
{'features': <tf.Tensor: id=135238, shape=(4,), dtype=float32, numpy=array([6.1, 2.8, 4.7, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=135239, shape=(), dtype=int64, numpy=1>}
{'features': <tf.Tensor: id=135245, shape=(4,), dtype=float32, numpy=array([5.7, 3. , 4.2, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=135246, shape=(), dtype=int64, numpy=1>}
(<tf.Tensor: id=135290, shape=(4,), dtype=float32, numpy=array([6.8, 2.8, 4.8, 1.4], dtype=float32)>, <tf.Tensor: id=135291, shape=(), dtype=int64, numpy=1>)
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
###Markdown
---Readers may ignore the next cell.
###Code
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
###Output
[NbConvertApp] Converting notebook ch13_part2.ipynb to script
[NbConvertApp] Writing 13934 bytes to ch13_part2.py
###Markdown
Machine Learning with PyTorch and Scikit-Learn -- Code Examples Package version checks Add folder to path in order to load from the check_packages.py script:
###Code
import sys
sys.path.insert(0, '..')
###Output
_____no_output_____
###Markdown
Check recommended package versions:
###Code
from python_environment_check import check_packages
d = {
'numpy': '1.21.2',
'pandas': '1.3.2',
'sklearn': '1.0',
'torch': '1.8',
'torchvision': '0.9.0'
}
check_packages(d)
###Output
[OK] numpy 1.22.0
[OK] pandas 1.3.5
[OK] sklearn 1.0.2
[OK] torch 1.10.0
[OK] torchvision 0.11.2
###Markdown
Chapter 13: Going Deeper -- the Mechanics of PyTorch (Part 2/3) **Outline**- [Project one - predicting the fuel efficiency of a car](Project-one----predicting-the-fuel-efficiency-of-a-car) - [Working with feature columns](Working-with-feature-columns) - [Training a DNN regression model](Training-a-DNN-regression-model)- [Project two - classifying MNIST handwritten digits](Project-two----classifying-MNIST-handwritten-digits)
###Code
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from IPython.display import Image
###Output
_____no_output_____
###Markdown
Project one - predicting the fuel efficiency of a car Working with feature columns
###Code
Image(filename='figures/02.png', width=700)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
df = pd.read_csv(url, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
df.tail()
print(df.isna().sum())
df = df.dropna()
df = df.reset_index(drop=True)
df.tail()
import sklearn
import sklearn.model_selection
df_train, df_test = sklearn.model_selection.train_test_split(df, train_size=0.8, random_state=1)
train_stats = df_train.describe().transpose()
train_stats
numeric_column_names = ['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration']
df_train_norm, df_test_norm = df_train.copy(), df_test.copy()
for col_name in numeric_column_names:
mean = train_stats.loc[col_name, 'mean']
std = train_stats.loc[col_name, 'std']
df_train_norm.loc[:, col_name] = (df_train_norm.loc[:, col_name] - mean)/std
df_test_norm.loc[:, col_name] = (df_test_norm.loc[:, col_name] - mean)/std
df_train_norm.tail()
boundaries = torch.tensor([73, 76, 79])
v = torch.tensor(df_train_norm['Model Year'].values)
df_train_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
v = torch.tensor(df_test_norm['Model Year'].values)
df_test_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
numeric_column_names.append('Model Year Bucketed')
from torch.nn.functional import one_hot
total_origin = len(set(df_train_norm['Origin']))
origin_encoded = one_hot(torch.from_numpy(df_train_norm['Origin'].values) % total_origin)
x_train_numeric = torch.tensor(df_train_norm[numeric_column_names].values)
x_train = torch.cat([x_train_numeric, origin_encoded], 1).float()
origin_encoded = one_hot(torch.from_numpy(df_test_norm['Origin'].values) % total_origin)
x_test_numeric = torch.tensor(df_test_norm[numeric_column_names].values)
x_test = torch.cat([x_test_numeric, origin_encoded], 1).float()
y_train = torch.tensor(df_train_norm['MPG'].values).float()
y_test = torch.tensor(df_test_norm['MPG'].values).float()
from torch.utils.data import DataLoader, TensorDataset
train_ds = TensorDataset(x_train, y_train)
batch_size = 8
torch.manual_seed(1)
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
hidden_units = [8, 4]
input_size = x_train.shape[1]
all_layers = []
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 1))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 200
log_epochs = 20
for epoch in range(num_epochs):
loss_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)[:, 0]
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_hist_train += loss.item()
if epoch % log_epochs==0:
print(f'Epoch {epoch} Loss {loss_hist_train/len(train_dl):.4f}')
with torch.no_grad():
pred = model(x_test.float())[:, 0]
loss = loss_fn(pred, y_test)
print(f'Test MSE: {loss.item():.4f}')
print(f'Test MAE: {nn.L1Loss()(pred, y_test).item():.4f}')
###Output
Test MSE: 9.6130
Test MAE: 2.1211
###Markdown
Project two - classifying MNIST hand-written digits
###Code
import torchvision
from torchvision import transforms
image_path = './'
transform = transforms.Compose([transforms.ToTensor()])
mnist_train_dataset = torchvision.datasets.MNIST(root=image_path,
train=True,
transform=transform,
download=False)
mnist_test_dataset = torchvision.datasets.MNIST(root=image_path,
train=False,
transform=transform,
download=False)
batch_size = 64
torch.manual_seed(1)
train_dl = DataLoader(mnist_train_dataset, batch_size, shuffle=True)
hidden_units = [32, 16]
image_size = mnist_train_dataset[0][0].shape
input_size = image_size[0] * image_size[1] * image_size[2]
all_layers = [nn.Flatten()]
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 10))
all_layers.append(nn.Softmax(dim=1))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 20
for epoch in range(num_epochs):
accuracy_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
is_correct = (torch.argmax(pred, dim=1) == y_batch).float()
accuracy_hist_train += is_correct.sum()
accuracy_hist_train /= len(train_dl.dataset)
print(f'Epoch {epoch} Accuracy {accuracy_hist_train:.4f}')
pred = model(mnist_test_dataset.data / 255.)
is_correct = (torch.argmax(pred, dim=1) == mnist_test_dataset.targets).float()
print(f'Test accuracy: {is_correct.mean():.4f}')
###Output
Test accuracy: 0.9557
###Markdown
---Readers may ignore the next cell.
###Code
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
###Output
[NbConvertApp] WARNING | Config option `kernel_spec_manager_class` not recognized by `NbConvertApp`.
[NbConvertApp] Converting notebook ch13_part2.ipynb to script
[NbConvertApp] Writing 7102 bytes to ch13_part2.py
###Markdown
Machine Learning with PyTorch and Scikit-Learn -- Code Examples Package version checks Add folder to path in order to load from the check_packages.py script:
###Code
import sys
sys.path.insert(0, '..')
###Output
_____no_output_____
###Markdown
Check recommended package versions:
###Code
from python_environment_check import check_packages
d = {
'numpy': '1.21.2',
'pandas': '1.3.2',
'sklearn': '1.0',
'torch': '1.8',
'torchvision': '0.9.0'
}
check_packages(d)
###Output
[OK] numpy 1.22.0
[OK] pandas 1.3.5
[OK] sklearn 1.0.2
[OK] torch 1.10.0
[OK] torchvision 0.11.2
###Markdown
Chapter 13: Going Deeper -- the Mechanics of PyTorch (Part 2/3) **Outline**- [Project one - predicting the fuel efficiency of a car](Project-one----predicting-the-fuel-efficiency-of-a-car) - [Working with feature columns](Working-with-feature-columns) - [Training a DNN regression model](Training-a-DNN-regression-model)- [Project two - classifying MNIST handwritten digits](Project-two----classifying-MNIST-handwritten-digits)
###Code
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from IPython.display import Image
###Output
_____no_output_____
###Markdown
Project one - predicting the fuel efficiency of a car Working with feature columns
###Code
Image(filename='figures/02.png', width=700)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
df = pd.read_csv(url, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
df.tail()
print(df.isna().sum())
df = df.dropna()
df = df.reset_index(drop=True)
df.tail()
import sklearn
import sklearn.model_selection
df_train, df_test = sklearn.model_selection.train_test_split(df, train_size=0.8, random_state=1)
train_stats = df_train.describe().transpose()
train_stats
numeric_column_names = ['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration']
df_train_norm, df_test_norm = df_train.copy(), df_test.copy()
for col_name in numeric_column_names:
mean = train_stats.loc[col_name, 'mean']
std = train_stats.loc[col_name, 'std']
df_train_norm.loc[:, col_name] = (df_train_norm.loc[:, col_name] - mean)/std
df_test_norm.loc[:, col_name] = (df_test_norm.loc[:, col_name] - mean)/std
df_train_norm.tail()
boundaries = torch.tensor([73, 76, 79])
v = torch.tensor(df_train_norm['Model Year'].values)
df_train_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
v = torch.tensor(df_test_norm['Model Year'].values)
df_test_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
numeric_column_names.append('Model Year Bucketed')
from torch.nn.functional import one_hot
total_origin = len(set(df_train_norm['Origin']))
origin_encoded = one_hot(torch.from_numpy(df_train_norm['Origin'].values) % total_origin)
x_train_numeric = torch.tensor(df_train_norm[numeric_column_names].values)
x_train = torch.cat([x_train_numeric, origin_encoded], 1).float()
origin_encoded = one_hot(torch.from_numpy(df_test_norm['Origin'].values) % total_origin)
x_test_numeric = torch.tensor(df_test_norm[numeric_column_names].values)
x_test = torch.cat([x_test_numeric, origin_encoded], 1).float()
y_train = torch.tensor(df_train_norm['MPG'].values).float()
y_test = torch.tensor(df_test_norm['MPG'].values).float()
from torch.utils.data import DataLoader, TensorDataset
train_ds = TensorDataset(x_train, y_train)
batch_size = 8
torch.manual_seed(1)
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
hidden_units = [8, 4]
input_size = x_train.shape[1]
all_layers = []
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 1))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 200
log_epochs = 20
for epoch in range(num_epochs):
loss_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)[:, 0]
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_hist_train += loss.item()
if epoch % log_epochs==0:
print(f'Epoch {epoch} Loss {loss_hist_train/len(train_dl):.4f}')
with torch.no_grad():
pred = model(x_test.float())[:, 0]
loss = loss_fn(pred, y_test)
print(f'Test MSE: {loss.item():.4f}')
print(f'Test MAE: {nn.L1Loss()(pred, y_test).item():.4f}')
###Output
Test MSE: 9.6130
Test MAE: 2.1211
###Markdown
Project two - classifying MNIST hand-written digits
###Code
import torchvision
from torchvision import transforms
image_path = './'
transform = transforms.Compose([transforms.ToTensor()])
mnist_train_dataset = torchvision.datasets.MNIST(root=image_path,
train=True,
transform=transform,
download=False)
mnist_test_dataset = torchvision.datasets.MNIST(root=image_path,
train=False,
transform=transform,
download=False)
batch_size = 64
torch.manual_seed(1)
train_dl = DataLoader(mnist_train_dataset, batch_size, shuffle=True)
hidden_units = [32, 16]
image_size = mnist_train_dataset[0][0].shape
input_size = image_size[0] * image_size[1] * image_size[2]
all_layers = [nn.Flatten()]
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 10))
all_layers.append(nn.Softmax(dim=1))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 20
for epoch in range(num_epochs):
accuracy_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
is_correct = (torch.argmax(pred, dim=1) == y_batch).float()
accuracy_hist_train += is_correct.sum()
accuracy_hist_train /= len(train_dl.dataset)
print(f'Epoch {epoch} Accuracy {accuracy_hist_train:.4f}')
pred = model(mnist_test_dataset.data / 255.)
is_correct = (torch.argmax(pred, dim=1) == mnist_test_dataset.targets).float()
print(f'Test accuracy: {is_correct.mean():.4f}')
###Output
Test accuracy: 0.9557
###Markdown
---Readers may ignore the next cell.
###Code
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
###Output
[NbConvertApp] WARNING | Config option `kernel_spec_manager_class` not recognized by `NbConvertApp`.
[NbConvertApp] Converting notebook ch13_part2.ipynb to script
[NbConvertApp] Writing 7103 bytes to ch13_part2.py
###Markdown
*Python Machine Learning 3rd Edition* by [Sebastian Raschka](https://sebastianraschka.com) & [Vahid Mirjalili](http://vahidmirjalili.com), Packt Publishing Ltd. 2019Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-editionCode License: [MIT License](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/LICENSE.txt) Chapter 13: Parallelizing Neural Network Training with TensorFlow (Part 2/2) Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
###Code
%load_ext watermark
%watermark -a "Sebastian Raschka & Vahid Mirjalili" -u -d -p numpy,scipy,matplotlib,tensorflow
%matplotlib inline
###Output
_____no_output_____
###Markdown
Building a neural network model in TensorFlow The TensorFlow Keras API (tf.keras) Building a linear regression model
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
X_train = np.arange(10).reshape((10, 1))
y_train = np.array([1.0, 1.3, 3.1,
2.0, 5.0, 6.3,
6.6, 7.4, 8.0,
9.0])
plt.plot(X_train, y_train, 'o', markersize=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
X_train_norm = (X_train - np.mean(X_train))/np.std(X_train)
ds_train_orig = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train_norm, tf.float32),
tf.cast(y_train, tf.float32)))
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.w = tf.Variable(0.0, name='weight')
self.b = tf.Variable(0.0, name='bias')
def call(self, x):
return self.w*x + self.b
model = MyModel()
model.build(input_shape=(None, 1))
model.summary()
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
## testing the function:
yt = tf.convert_to_tensor([1.0])
yp = tf.convert_to_tensor([1.5])
loss_fn(yt, yp)
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as tape:
current_loss = loss_fn(model(inputs), outputs)
dW, db = tape.gradient(current_loss, [model.w, model.b])
model.w.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
tf.random.set_seed(1)
num_epochs = 200
log_steps = 100
learning_rate = 0.001
batch_size = 1
steps_per_epoch = int(np.ceil(len(y_train) / batch_size))
ds_train = ds_train_orig.shuffle(buffer_size=len(y_train))
ds_train = ds_train.repeat(count=None)
ds_train = ds_train.batch(1)
Ws, bs = [], []
for i, batch in enumerate(ds_train):
if i >= steps_per_epoch * num_epochs:
break
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
bx, by = batch
loss_val = loss_fn(model(bx), by)
train(model, bx, by, learning_rate=learning_rate)
if i%log_steps==0:
print('Epoch {:4d} Step {:2d} Loss {:6.4f}'.format(
int(i/steps_per_epoch), i, loss_val))
print('Final Parameters:', model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training examples', 'Linear Reg.'], fontsize=15)
ax.set_xlabel('x', size=15)
ax.set_ylabel('y', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['Weight w', 'Bias unit b'], fontsize=15)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Value', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
#plt.savefig('ch13-linreg-1.pdf')
plt.show()
###Output
Final Parameters: 2.6576622 4.8798566
###Markdown
Model training via the .compile() and .fit() methods
###Code
tf.random.set_seed(1)
model = MyModel()
#model.build((None, 1))
model.compile(optimizer='sgd',
loss=loss_fn,
metrics=['mae', 'mse'])
model.fit(X_train_norm, y_train,
epochs=num_epochs, batch_size=batch_size,
verbose=1)
print(model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training Samples', 'Linear Regression'], fontsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['W', 'bias'], fontsize=15)
plt.show()
###Output
2.7058775 4.971019
###Markdown
Building a multilayer perceptron for classifying flowers in the Iris dataset
###Code
import tensorflow_datasets as tfds
iris, iris_info = tfds.load('iris', with_info=True)
print(iris_info)
tf.random.set_seed(1)
ds_orig = iris['train']
ds_orig = ds_orig.shuffle(150, reshuffle_each_iteration=False)
print(next(iter(ds_orig)))
ds_train_orig = ds_orig.take(100)
ds_test = ds_orig.skip(100)
## checking the number of examples:
n = 0
for example in ds_train_orig:
n += 1
print(n)
n = 0
for example in ds_test:
n += 1
print(n)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
next(iter(ds_train_orig))
iris_model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='sigmoid',
name='fc1', input_shape=(4,)),
tf.keras.layers.Dense(3, name='fc2', activation='softmax')])
iris_model.summary()
iris_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
num_epochs = 100
training_size = 100
batch_size = 2
steps_per_epoch = np.ceil(training_size / batch_size)
ds_train = ds_train_orig.shuffle(buffer_size=training_size)
ds_train = ds_train.repeat()
ds_train = ds_train.batch(batch_size=batch_size)
ds_train = ds_train.prefetch(buffer_size=1000)
history = iris_model.fit(ds_train, epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=0)
hist = history.history
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(hist['loss'], lw=3)
ax.set_title('Training loss', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(hist['accuracy'], lw=3)
ax.set_title('Training accuracy', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
#plt.savefig('ch13-cls-learning-curve.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
Evaluating the trained model on the test dataset
###Code
results = iris_model.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
###Output
Test loss: 0.1461 Test Acc.: 1.0000
###Markdown
Saving and reloading the trained model
###Code
iris_model.save('iris-classifier.h5',
overwrite=True,
include_optimizer=True,
save_format='h5')
iris_model_new = tf.keras.models.load_model('iris-classifier.h5')
iris_model_new.summary()
results = iris_model_new.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
iris_model_new.to_json()
###Output
_____no_output_____
###Markdown
Choosing activation functions for multilayer neural networks Logistic function recap
###Code
import numpy as np
X = np.array([1, 1.4, 2.5]) ## first value must be 1
w = np.array([0.4, 0.3, 0.5])
def net_input(X, w):
return np.dot(X, w)
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w))
# W : array with shape = (n_output_units, n_hidden_units+1)
# note that the first column are the bias units
W = np.array([[1.1, 1.2, 0.8, 0.4],
[0.2, 0.4, 1.0, 0.2],
[0.6, 1.5, 1.2, 0.7]])
# A : data array with shape = (n_hidden_units + 1, n_samples)
# note that the first column of this array must be 1
A = np.array([[1, 0.1, 0.4, 0.6]])
Z = np.dot(W, A[0])
y_probas = logistic(Z)
print('Net Input: \n', Z)
print('Output Units:\n', y_probas)
y_class = np.argmax(Z, axis=0)
print('Predicted class label: %d' % y_class)
###Output
Predicted class label: 0
###Markdown
Estimating class probabilities in multiclass classification via the softmax function
###Code
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
np.sum(y_probas)
import tensorflow as tf
Z_tensor = tf.expand_dims(Z, axis=0)
tf.keras.activations.softmax(Z_tensor)
###Output
_____no_output_____
###Markdown
Broadening the output spectrum using a hyperbolic tangent
###Code
import matplotlib.pyplot as plt
%matplotlib inline
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('Net input $z$')
plt.ylabel('Activation $\phi(z)$')
plt.axhline(1, color='black', linestyle=':')
plt.axhline(0.5, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.axhline(-0.5, color='black', linestyle=':')
plt.axhline(-1, color='black', linestyle=':')
plt.plot(z, tanh_act,
linewidth=3, linestyle='--',
label='Tanh')
plt.plot(z, log_act,
linewidth=3,
label='Logistic')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
np.tanh(z)
import tensorflow as tf
tf.keras.activations.tanh(z)
from scipy.special import expit
expit(z)
tf.keras.activations.sigmoid(z)
###Output
_____no_output_____
###Markdown
Rectified linear unit activation
###Code
import tensorflow as tf
tf.keras.activations.relu(z)
###Output
_____no_output_____
###Markdown
Summary Appendix Splitting a dataset: danger of mixing train/test examples
###Code
## the correct way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=False)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
## The wrong way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=True)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
###Output
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
###Markdown
Splitting a dataset using `tfds.Split`
###Code
##--------------------------- Attention ------------------------##
## ##
## Note: currently, tfds.Split has a bug in TF 2.0.0 ##
## ##
## I.e., splitting [2, 1] is expected to result in ##
## 100 train and 50 test examples ##
## ##
## but instead, it results in 116 train and 34 test examples ##
## ##
##--------------------------------------------------------------##
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
## method 1: specifying percentage:
#first_67_percent = tfds.Split.TRAIN.subsplit(tfds.percent[:67])
#last_33_percent = tfds.Split.TRAIN.subsplit(tfds.percent[-33:])
#ds_train_orig = tfds.load('iris', split=first_67_percent)
#ds_test = tfds.load('iris', split=last_33_percent)
## method 2: specifying the weights
split_train, split_test = tfds.Split.TRAIN.subsplit([2, 1])
ds_train_orig = tfds.load('iris', split=split_train)
ds_test = tfds.load('iris', split=split_test)
print(next(iter(ds_train_orig)))
print()
print(next(iter(ds_test)))
ds_train_orig = ds_train_orig.shuffle(100, reshuffle_each_iteration=True)
ds_test = ds_test.shuffle(50, reshuffle_each_iteration=False)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
print(next(iter(ds_train_orig)))
for j in range(5):
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
labels_test = np.array(labels_test)
print(np.sum(labels_test == 0), np.sum(labels_test == 1), np.sum(labels_test == 2))
###Output
{'features': <tf.Tensor: id=135251, shape=(4,), dtype=float32, numpy=array([6.1, 2.8, 4.7, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=135252, shape=(), dtype=int64, numpy=1>}
{'features': <tf.Tensor: id=135258, shape=(4,), dtype=float32, numpy=array([5.7, 3. , 4.2, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=135259, shape=(), dtype=int64, numpy=1>}
(<tf.Tensor: id=135303, shape=(4,), dtype=float32, numpy=array([6.8, 2.8, 4.8, 1.4], dtype=float32)>, <tf.Tensor: id=135304, shape=(), dtype=int64, numpy=1>)
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
###Markdown
---Readers may ignore the next cell.
###Code
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
###Output
[NbConvertApp] Converting notebook ch13_part2.ipynb to script
[NbConvertApp] Writing 14023 bytes to ch13_part2.py
###Markdown
*Python Machine Learning 3rd Edition* by [Sebastian Raschka](https://sebastianraschka.com) & [Vahid Mirjalili](http://vahidmirjalili.com), Packt Publishing Ltd. 2019Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-editionCode License: [MIT License](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/LICENSE.txt) Chapter 13: Parallelizing Neural Network Training with TensorFlow (Part 2/2) Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
###Code
%load_ext watermark
%watermark -a "Sebastian Raschka & Vahid Mirjalili" -u -d -p numpy,scipy,matplotlib,tensorflow
%matplotlib inline
###Output
_____no_output_____
###Markdown
Building a neural network model in TensorFlow The TensorFlow Keras API (tf.keras) Building a linear regression model
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
X_train = np.arange(10).reshape((10, 1))
y_train = np.array([1.0, 1.3, 3.1,
2.0, 5.0, 6.3,
6.6, 7.4, 8.0,
9.0])
plt.plot(X_train, y_train, 'o', markersize=10)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
X_train_norm = (X_train - np.mean(X_train))/np.std(X_train)
ds_train_orig = tf.data.Dataset.from_tensor_slices(
(tf.cast(X_train_norm, tf.float32),
tf.cast(y_train, tf.float32)))
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.w = tf.Variable(0.0, name='weight')
self.b = tf.Variable(0.0, name='bias')
def call(self, x):
return self.w*x + self.b
model = MyModel()
model.build(input_shape=(None, 1))
model.summary()
###Output
Model: "my_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
Total params: 2
Trainable params: 2
Non-trainable params: 0
_________________________________________________________________
###Markdown
メモ`model`の意味がわからなかったので後で調べます。ヘルプみてもあまり理解できなかった・・・
###Code
model.build??
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.square(y_true - y_pred))
## testing the function:
yt = tf.convert_to_tensor([1.0])
yp = tf.convert_to_tensor([1.5])
loss_fn(yt, yp)
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as tape:
current_loss = loss_fn(model(inputs), outputs)
dW, db = tape.gradient(current_loss, [model.w, model.b])
model.w.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
tf.random.set_seed(1)
num_epochs = 200
log_steps = 100
learning_rate = 0.001
batch_size = 1
steps_per_epoch = int(np.ceil(len(y_train) / batch_size))
ds_train = ds_train_orig.shuffle(buffer_size=len(y_train))
ds_train = ds_train.repeat(count=None)
ds_train = ds_train.batch(1)
Ws, bs = [], []
for i, batch in enumerate(ds_train):
if i >= steps_per_epoch * num_epochs:
break
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
bx, by = batch
loss_val = loss_fn(model(bx), by)
train(model, bx, by, learning_rate=learning_rate)
if i%log_steps==0:
print('Epoch {:4d} Step {:2d} Loss {:6.4f}'.format(
int(i/steps_per_epoch), i, loss_val))
print('Final Parameters:', model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training examples', 'Linear Reg.'], fontsize=15)
ax.set_xlabel('x', size=15)
ax.set_ylabel('y', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['Weight w', 'Bias unit b'], fontsize=15)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Value', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
#plt.savefig('ch13-linreg-1.pdf')
plt.show()
###Output
Final Parameters: 2.6576622 4.8798566
###Markdown
Model training via the .compile() and .fit() methods
###Code
tf.random.set_seed(1)
model = MyModel()
#model.build((None, 1))
model.compile(optimizer='sgd',
loss=loss_fn,
metrics=['mae', 'mse'])
model.fit(X_train_norm, y_train,
epochs=num_epochs, batch_size=batch_size,
verbose=1)
print(model.w.numpy(), model.b.numpy())
X_test = np.linspace(0, 9, num=100).reshape(-1, 1)
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
y_pred = model(tf.cast(X_test_norm, dtype=tf.float32))
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1, 2, 1)
plt.plot(X_train_norm, y_train, 'o', markersize=10)
plt.plot(X_test_norm, y_pred, '--', lw=3)
plt.legend(['Training Samples', 'Linear Regression'], fontsize=15)
ax = fig.add_subplot(1, 2, 2)
plt.plot(Ws, lw=3)
plt.plot(bs, lw=3)
plt.legend(['W', 'bias'], fontsize=15)
plt.show()
###Output
2.7058775 4.971019
###Markdown
Building a multilayer perceptron for classifying flowers in the Iris dataset
###Code
import tensorflow_datasets as tfds
iris, iris_info = tfds.load('iris', with_info=True)
print(iris_info)
tf.random.set_seed(1)
ds_orig = iris['train']
ds_orig = ds_orig.shuffle(150, reshuffle_each_iteration=False)
print(next(iter(ds_orig)))
ds_train_orig = ds_orig.take(100)
ds_test = ds_orig.skip(100) # 101個目以降のデータを取ってきている。takeとskipはインデックスのスライスの役割とひとまず考えておく。
## checking the number of examples:
n = 0
for example in ds_train_orig:
n += 1
print(n)
n = 0
for example in ds_test:
n += 1
print(n)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
next(iter(ds_train_orig))
iris_model = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='sigmoid',
name='fc1', input_shape=(4,)),
tf.keras.layers.Dense(3, name='fc2', activation='softmax')])
iris_model.summary()
iris_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
num_epochs = 100
training_size = 100
batch_size = 2
steps_per_epoch = np.ceil(training_size / batch_size)
ds_train = ds_train_orig.shuffle(buffer_size=training_size)
ds_train = ds_train.repeat()
ds_train = ds_train.batch(batch_size=batch_size) # 次に処理するデータをプリフェッチしてくれる。ここを見ると推奨設定らしい:https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch
ds_train = ds_train.prefetch(buffer_size=1000)
history = iris_model.fit(ds_train, epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=0)
hist = history.history
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(hist['loss'], lw=3)
ax.set_title('Training loss', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(hist['accuracy'], lw=3)
ax.set_title('Training accuracy', size=15)
ax.set_xlabel('Epoch', size=15)
ax.tick_params(axis='both', which='major', labelsize=15)
plt.tight_layout()
#plt.savefig('ch13-cls-learning-curve.pdf')
plt.show()
###Output
_____no_output_____
###Markdown
Evaluating the trained model on the test dataset
###Code
results = iris_model.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
###Output
Test loss: 0.0692 Test Acc.: 0.9800
###Markdown
Saving and reloading the trained model
###Code
iris_model.save('iris-classifier.h5',
overwrite=True,
include_optimizer=True,
save_format='h5')
iris_model_new = tf.keras.models.load_model('iris-classifier.h5')
iris_model_new.summary()
results = iris_model_new.evaluate(ds_test.batch(50), verbose=0)
print('Test loss: {:.4f} Test Acc.: {:.4f}'.format(*results))
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
iris_model_new.to_json()
###Output
_____no_output_____
###Markdown
Choosing activation functions for multilayer neural networks Logistic function recap
###Code
import numpy as np
X = np.array([1, 1.4, 2.5]) ## first value must be 1
w = np.array([0.4, 0.3, 0.5])
def net_input(X, w):
return np.dot(X, w)
def logistic(z):
return 1.0 / (1.0 + np.exp(-z))
def logistic_activation(X, w):
z = net_input(X, w)
return logistic(z)
print('P(y=1|x) = %.3f' % logistic_activation(X, w))
# W : array with shape = (n_output_units, n_hidden_units+1)
# note that the first column are the bias units
W = np.array([[1.1, 1.2, 0.8, 0.4],
[0.2, 0.4, 1.0, 0.2],
[0.6, 1.5, 1.2, 0.7]])
# A : data array with shape = (n_hidden_units + 1, n_samples)
# note that the first column of this array must be 1
A = np.array([[1, 0.1, 0.4, 0.6]])
Z = np.dot(W, A[0])
y_probas = logistic(Z)
print('Net Input: \n', Z)
print('Output Units:\n', y_probas)
y_class = np.argmax(Z, axis=0)
print('Predicted class label: %d' % y_class)
###Output
Predicted class label: 0
###Markdown
Estimating class probabilities in multiclass classification via the softmax function
###Code
def softmax(z):
return np.exp(z) / np.sum(np.exp(z))
y_probas = softmax(Z)
print('Probabilities:\n', y_probas)
np.sum(y_probas)
import tensorflow as tf
Z_tensor = tf.expand_dims(Z, axis=0)
tf.keras.activations.softmax(Z_tensor)
###Output
_____no_output_____
###Markdown
Broadening the output spectrum using a hyperbolic tangent
###Code
import matplotlib.pyplot as plt
%matplotlib inline
def tanh(z):
e_p = np.exp(z)
e_m = np.exp(-z)
return (e_p - e_m) / (e_p + e_m)
z = np.arange(-5, 5, 0.005)
log_act = logistic(z)
tanh_act = tanh(z)
plt.ylim([-1.5, 1.5])
plt.xlabel('Net input $z$')
plt.ylabel('Activation $\phi(z)$')
plt.axhline(1, color='black', linestyle=':')
plt.axhline(0.5, color='black', linestyle=':')
plt.axhline(0, color='black', linestyle=':')
plt.axhline(-0.5, color='black', linestyle=':')
plt.axhline(-1, color='black', linestyle=':')
plt.plot(z, tanh_act,
linewidth=3, linestyle='--',
label='Tanh')
plt.plot(z, log_act,
linewidth=3,
label='Logistic')
plt.legend(loc='lower right')
plt.tight_layout()
plt.show()
np.tanh(z)
import tensorflow as tf
tf.keras.activations.tanh(z)
from scipy.special import expit
expit(z)
tf.keras.activations.sigmoid(z)
###Output
_____no_output_____
###Markdown
Rectified linear unit activation
###Code
import tensorflow as tf
tf.keras.activations.relu(z)
###Output
_____no_output_____
###Markdown
Summary Appendix Splitting a dataset: danger of mixing train/test examples
###Code
## the correct way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=False)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
## The wrong way:
ds = tf.data.Dataset.range(15)
ds = ds.shuffle(15, reshuffle_each_iteration=True)
ds_train = ds.take(10)
ds_test = ds.skip(10)
ds_train = ds_train.shuffle(10).repeat(10)
ds_test = ds_test.shuffle(5)
ds_test = ds_test.repeat(10)
set_train = set()
for i,item in enumerate(ds_train):
set_train.add(item.numpy())
set_test = set()
for i,item in enumerate(ds_test):
set_test.add(item.numpy())
print(set_train, set_test)
###Output
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
###Markdown
Splitting a dataset using `tfds.Split`
###Code
##--------------------------- Attention ------------------------##
## ##
## Note: currently, tfds.Split has a bug in TF 2.0.0 ##
## ##
## I.e., splitting [2, 1] is expected to result in ##
## 100 train and 50 test examples ##
## ##
## but instead, it results in 116 train and 34 test examples ##
## ##
##--------------------------------------------------------------##
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
## method 1: specifying percentage:
#first_67_percent = tfds.Split.TRAIN.subsplit(tfds.percent[:67])
#last_33_percent = tfds.Split.TRAIN.subsplit(tfds.percent[-33:])
#ds_train_orig = tfds.load('iris', split=first_67_percent)
#ds_test = tfds.load('iris', split=last_33_percent)
## method 2: specifying the weights
split_train, split_test = tfds.Split.TRAIN.subsplit([2, 1])
ds_train_orig = tfds.load('iris', split=split_train)
ds_test = tfds.load('iris', split=split_test)
print(next(iter(ds_train_orig)))
print()
print(next(iter(ds_test)))
ds_train_orig = ds_train_orig.shuffle(100, reshuffle_each_iteration=True)
ds_test = ds_test.shuffle(50, reshuffle_each_iteration=False)
ds_train_orig = ds_train_orig.map(
lambda x: (x['features'], x['label']))
ds_test = ds_test.map(
lambda x: (x['features'], x['label']))
print(next(iter(ds_train_orig)))
for j in range(5):
labels_train = []
for i,item in enumerate(ds_train_orig):
labels_train.append(item[1].numpy())
labels_test = []
for i,item in enumerate(ds_test):
labels_test.append(item[1].numpy())
print('Training Set: ',len(labels_train), 'Test Set: ', len(labels_test))
labels_test = np.array(labels_test)
print(np.sum(labels_test == 0), np.sum(labels_test == 1), np.sum(labels_test == 2))
###Output
{'features': <tf.Tensor: id=33147, shape=(4,), dtype=float32, numpy=array([6.1, 2.8, 4.7, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=33148, shape=(), dtype=int64, numpy=1>}
{'features': <tf.Tensor: id=33154, shape=(4,), dtype=float32, numpy=array([5.7, 3. , 4.2, 1.2], dtype=float32)>, 'label': <tf.Tensor: id=33155, shape=(), dtype=int64, numpy=1>}
(<tf.Tensor: id=33199, shape=(4,), dtype=float32, numpy=array([6.9, 3.1, 5.1, 2.3], dtype=float32)>, <tf.Tensor: id=33200, shape=(), dtype=int64, numpy=2>)
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
Training Set: 116 Test Set: 34
10 12 12
###Markdown
---Readers may ignore the next cell.
###Code
! python ../.convert_notebook_to_script.py --input ch13_part2.ipynb --output ch13_part2.py
###Output
[NbConvertApp] Converting notebook ch13_part2.ipynb to script
[NbConvertApp] Writing 14023 bytes to ch13_part2.py
|
Submission1_notebooks/Figure7-Across_the_gene-PlaB-insulation.ipynb | ###Markdown
Let's merge in Gene Expression level information into our gene annotation DataFrame
###Code
rsem_df = pd.read_csv("merged_TPM_genes.tsv",sep="\t")[["Id","rsem.out.S442_NT"]]
# genes_Df overlap rsem
# ~4_300 ~23_200 ~2_500
genes_exp_df = pd.merge(
rsem_df,
genes_df,
how = 'inner',
left_on="Id",
right_on="geneName"
)
genes_exp_df.head(3)
# columns needed for TSS/TTS bed files output:
tx_cols = ["chr", "txStart", "txEnd", "strand", "rsem.out.S442_NT"]
tx_cols_rename = {"chr":"chrom",
"txStart":"start",
"txEnd":"end",
"rsem.out.S442_NT":"exp"}
gdf = genes_exp_df[tx_cols].reset_index(drop=True).rename(tx_cols_rename,axis=1)
gdf["size"] = gdf["end"] - gdf["start"]
gdf = gdf.sort_values(["chrom","start"])
gdf = gdf[gdf["chrom"].isin(autosomal_chroms)]
gdf = gdf.reset_index(drop=True)
gdf.head(3)
bins = np.r_[0,np.geomspace(100,1_000_000),10_000_000]
gdf[gdf["strand"]=="+"]["size"].plot.hist(bins=bins,log=True,label="+")
gdf[gdf["strand"]=="-"]["size"].plot.hist(bins=bins,log=True,alpha=0.7,label="-")
ax = plt.gca()
ax.set_xscale("log")
ax.set_xlabel("transcript size, bp")
ax.legend(frameon=False)
# actually - negative polarity genes are less expressed than tha positive strand genes ...
bins = np.r_[0,np.geomspace(1,12000)]
gdf[gdf["strand"]=="+"]["exp"].plot.hist(bins=bins,log=True,label="+")
gdf[gdf["strand"]=="-"]["exp"].plot.hist(bins=bins,log=True,alpha=0.7,label="-")
ax = plt.gca()
ax.set_xscale("log")
ax.set_xlabel("RPKM")
ax.legend(frameon=False)
exp_bin_edges = [0,1,10,np.inf]
exp_labels = ("no","low","high")
sizes_bin_edges = [0,50_000,np.inf]
sizes_labels = ("short","long")
gdf["gexp"] = pd.cut(
gdf["exp"],
bins = exp_bin_edges,
labels = exp_labels,
include_lowest = True
)
gdf["gsize"] = pd.cut(
gdf["size"],
bins = sizes_bin_edges,
labels = sizes_labels,
include_lowest = True
)
display(gdf.head())
display( gdf.groupby(["gexp","gsize"]).size().unstack(fill_value=0) )
# extend the stacking region by the gene length on each side !
gdf["start"] = gdf["start"] - gdf["size"]
gdf["end"] = gdf["end"] + gdf["size"]
def extract_ins(df,
fname,
bins=3000
):
"""
for a BED-like DF, extract 'signal' from BW (fname) for every interval
use "strand" information to flip "-" strand genes ...
"""
return np.asarray([
bbi.fetch
(
fname,
chrom,
start,
end,
bins=bins,
missing=0.0,
oob=np.nan,
summary='mean',
)[:: (1 if strand=="+" else -1) ] # flip in place for "-" genes
for chrom, start, end, strand, *_ in df.itertuples(index=False)
])
def extract_ins_noflip(df,
fname,
bins=3000
):
"""
for a BED-like DF, extract 'signal' from BW (fname) for every interval
no flipping ...
"""
return np.asarray([
bbi.fetch
(
fname,
chrom,
start,
end,
bins=bins,
missing=0.0,
oob=np.nan,
summary='mean',
)
for chrom, start, end, *_ in df.itertuples(index=False)
])
ctcf_df = bioframe.read_table("intersect-all-NT-CTCF-NO-G4-centered-RAD21",schema="bed3",index_col=False)
# # ctcf_df = pd.read_table("intersect-all-NT-CTCF-NO-G4-centered-RAD21",header=None)
ctcf_df = ctcf_df.sort_values(["chrom","start"])
ctcf_df = ctcf_df[ctcf_df["chrom"].isin(autosomal_chroms)]
ctcf_df = ctcf_df.reset_index(drop=True)
ctcf_df["start"] = ctcf_df["start"] - 200_000
ctcf_df["end"] = ctcf_df["end"] + 200_000
ctcf_df.head(3)
###Output
_____no_output_____
###Markdown
TSS to TTS scaled profiles ...
###Code
# key features/signals that we want to extract for our genes ...
key_features = ["ins_CtrPlaB_CTCF",
"ins_CtrPlaB_noCTCF"]
# "ins_PlaB_CTCF",
# "ins_PlaB_noCTCF",
# "ev1_CtrPlaB_CTCF",
# "ev1_CtrPlaB_noCTCF",
# "ev1_PlaB_CTCF",
# "ev1_PlaB_noCTCF"]
###Output
_____no_output_____
###Markdown
Extracting Meta-gene profiles of the key-features (insulations, Chip_Seq data etc) Here we extract data for + and - genes separately and flip "-" genes to orient them same way as the "+" genes ... using `extract_ins` function...
###Code
profiles_dict = {}
# group genes by expression and size before extracting corresponding profiles of a list of features :
gene_groups = gdf.groupby(["gexp","gsize"])
for feature_name in key_features:
print(f"extracting {feature_name} for the genes ...")
feature_fname = samples[feature_name]["fname"]
profiles_dict[feature_name] = gene_groups.apply( lambda sub_df: extract_ins(sub_df, feature_fname) )
ins_stack = profiles_dict["ins_CtrPlaB_CTCF"][("high","long")]
l,w = ins_stack.shape
flank_size = int(w*0.25)
# get shoulders of the stack
shoulder_idxs = np.r_[:flank_size,w-flank_size:w]
ins_stack = ins_stack - np.nanmean(ins_stack[:,shoulder_idxs],axis=1,keepdims=True)
# for every stack yield - bottom of the corridor, upper level and mean ...
_x = np.arange(w)
_up = np.nanquantile(ins_stack,0.45,axis=0)
_down = np.nanquantile(ins_stack,0.55,axis=0)
_profile = np.nanmean(ins_stack,axis=0)
# ins_stack = profiles_dict["ins_CtrPlaB_CTCF"][("high","long")]
plt.imshow(ins_stack - np.nanmean(ins_stack[:,shoulder_idxs],axis=1,keepdims=True),interpolation="bilinear",vmin=-.1,vmax=.1)
###Output
_____no_output_____
###Markdown
CTCF insulation/Rloops profiles to match the scaled TSS/TTS profiles ... In this case we are not flipping profiles according to orientation (yet), so we have to use `extract_ins_noflip` function
###Code
CTCF_profiles_dict = {}
for feature_name in key_features:
print(f"extracting {feature_name} for the CTCF peaks ...")
feature_fname = samples[feature_name]["fname"]
CTCF_profiles_dict[feature_name] = extract_ins_noflip(ctcf_df, feature_fname, bins=300)
###Output
extracting ins_CtrPlaB_CTCF for the CTCF peaks ...
extracting ins_CtrPlaB_noCTCF for the CTCF peaks ...
###Markdown
Normalize insulation by subtracting average flanking signal
###Code
ins_keys = [k for k in key_features if k.startswith("ins") ]
for feature_name in ins_keys:
print(f"normalizing insulation {feature_name} for CTCF-peak profiles ...")
ins_profile = CTCF_profiles_dict[feature_name]
flank_size = int(len(ins_profile)*0.25)
# average left and right shoulders of `flank_size` ...
average_signal_across_flank = np.nanmean(np.r_[ins_profile[:flank_size], ins_profile[-flank_size:]])
# subtract that average flank ...
CTCF_profiles_dict[feature_name] = CTCF_profiles_dict[feature_name] - average_signal_across_flank
for feature_name in ins_keys:
print(f"normalizing insulation {feature_name} for gene profiles ...")
for gexp_gsize_key, ins_profile in profiles_dict[feature_name].iteritems():
flank_size = int(len(ins_profile)*0.25)
# average left and right shoulders of `flank_size` ...
average_signal_across_flank = np.nanmean(np.r_[ins_profile[:flank_size], ins_profile[-flank_size:]])
# subtract that average flank ...
profiles_dict[feature_name][gexp_gsize_key] = ins_profile - average_signal_across_flank
###Output
normalizing insulation ins_CtrPlaB_CTCF for CTCF-peak profiles ...
normalizing insulation ins_CtrPlaB_noCTCF for CTCF-peak profiles ...
normalizing insulation ins_PlaB_CTCF for CTCF-peak profiles ...
normalizing insulation ins_PlaB_noCTCF for CTCF-peak profiles ...
normalizing insulation ins_CtrPlaB_CTCF for gene profiles ...
normalizing insulation ins_CtrPlaB_noCTCF for gene profiles ...
normalizing insulation ins_PlaB_CTCF for gene profiles ...
normalizing insulation ins_PlaB_noCTCF for gene profiles ...
###Markdown
Plotting the profiles ...
###Code
glens=("short","long")
gexps=("no","low","high")
mutant_keys = [k for k in key_features if k.startswith("ins") ]
fig = plt.figure(figsize=(12,8),constrained_layout=True)
gs = fig.add_gridspec(len(glens),len(gexps))
for i,glen in enumerate(glens):
for j,gexp in enumerate(gexps):
ax = fig.add_subplot(gs[i,j])
for feature_name in mutant_keys:
ins_profiles = profiles_dict[feature_name]
ax.plot(ins_profiles[(gexp,glen)],label=feature_name)
ax.set_title(f"{gexp}-{glen}")
ax.legend(frameon=False)
ax.set_ylim(-0.17,.15)
ax.set_xlim(0,3000)
ax.set_xticks([1000,2000])
ax.set_xticklabels(["TSS","TTS"])
glens=("short","long")
gexps=("no","low","high")
mutant_keys = [k for k in key_features if k.startswith("ev1") ]
fig = plt.figure(figsize=(12,8),constrained_layout=True)
gs = fig.add_gridspec(len(glens),len(gexps))
for i,glen in enumerate(glens):
for j,gexp in enumerate(gexps):
ax = fig.add_subplot(gs[i,j])
for feature_name in mutant_keys:
ins_profiles = profiles_dict[feature_name]
ax.plot(ins_profiles[(gexp,glen)],label=feature_name)
ax.set_title(f"{gexp}-{glen}")
ax.legend(frameon=False)
ax.set_ylim(-0.3,.3)
ax.set_xlim(0,3000)
ax.set_xticks([1000,2000])
ax.set_xticklabels(["TSS","TTS"])
###Output
_____no_output_____
###Markdown
Figure 3C itself ! comparing average insulation
###Code
fig = plt.figure(figsize=(12,7),constrained_layout=True)
gs = fig.add_gridspec(2,3)
ins_ylim = (-0.27,.1)
rloop_ylim = (.7,4.5)
ins_keys_figure = key_features
# insulation profiles around CTCF-peaks
ax = fig.add_subplot(gs[0,0])
for feature_name in ins_keys_figure:
ins_profile = CTCF_profiles_dict[feature_name]
ax.plot(ins_profile,label=feature_name)
ax.set_title(f"CTCF sites (w RAD21, wo G4)")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,300)
ax.set_xticks([])
ax.set_xticklabels([])
# insulation profiles around genes ...
glen="long"
gexps=("high","no")
for j, gexp in enumerate(gexps):
ax = fig.add_subplot(gs[0,j+1])
for feature_name in ins_keys_figure:
ins_profiles = profiles_dict[feature_name]
ax.plot(ins_profiles[(gexp,glen)],label=feature_name)
ax.set_title(f"{gexp}-{glen}")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,3000)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
fig = plt.figure(figsize=(12,7),constrained_layout=True)
gs = fig.add_gridspec(2,3)
ins_ylim = (-0.27,.1)
rloop_ylim = (.7,4.5)
ins_keys_figure = [k for k in key_features if (k.startswith("ins_siCtr") or k.startswith("ins_siDDX")) ]
# insulation profiles around CTCF-peaks
ax = fig.add_subplot(gs[0,0])
for feature_name in ins_keys_figure:
ins_profile = CTCF_profiles_dict[feature_name]
ax.plot(ins_profile,label=feature_name)
ax.set_title(f"CTCF sites (w RAD21, wo G4)")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,300)
ax.set_xticks([])
ax.set_xticklabels([])
# insulation profiles around genes ...
glen="long"
gexps=("high","no")
for j, gexp in enumerate(gexps):
ax = fig.add_subplot(gs[0,j+1])
for feature_name in ins_keys_figure:
ins_profiles = profiles_dict[feature_name]
ax.plot(ins_profiles[(gexp,glen)],label=feature_name)
ax.set_title(f"{gexp}-{glen}")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,3000)
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ins_keys_figure = [k for k in key_features if (k.startswith("ins_siCtr") or k.startswith("ins_siTAF")) ]
# insulation profiles around CTCF-peaks
ax = fig.add_subplot(gs[1,0])
for feature_name in ins_keys_figure:
ins_profile = CTCF_profiles_dict[feature_name]
ax.plot(ins_profile,label=feature_name)
ax.set_title(f"CTCF sites (w RAD21, wo G4)")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,300)
ax.set_xticks([0,300])
ax.set_xticklabels(["-200kb","200kb"])
# insulation profiles around genes ...
glen="long"
gexps=("high","no")
for j, gexp in enumerate(gexps):
ax = fig.add_subplot(gs[1,j+1])
for feature_name in ins_keys_figure:
ins_profiles = profiles_dict[feature_name]
ax.plot(ins_profiles[(gexp,glen)],label=feature_name)
ax.set_title(f"{gexp}-{glen}")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,3000)
ax.set_xticks([1000,2000])
ax.set_xticklabels(["TSS","TTS"])
ax.set_yticks([])
ax.set_yticklabels([])
plt.savefig("Figure6-siRNA-metaGene-insulation.pdf")
fig = plt.figure(figsize=(12,7),constrained_layout=True)
gs = fig.add_gridspec(2,3)
ins_ylim = (-0.27,.1)
rloop_ylim = (.7,4.5)
ins_keys_figure = ["ins_rad21","ins_norad21"]
# insulation profiles around CTCF-peaks
ax = fig.add_subplot(gs[1,0])
for feature_name in ins_keys_figure:
ins_profile = CTCF_profiles_dict[feature_name]
ax.plot(ins_profile,label=feature_name)
ax.set_title(f"CTCF sites (w RAD21, wo G4)")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,300)
ax.set_xticks([0,300])
ax.set_xticklabels(["-200kb","200kb"])
# insulation profiles around genes ...
glen="long"
gexps=("high","no")
for j, gexp in enumerate(gexps):
ax = fig.add_subplot(gs[1,j+1])
for feature_name in ins_keys_figure:
ins_profiles = profiles_dict[feature_name]
ax.plot(ins_profiles[(gexp,glen)],label=feature_name)
ax.set_title(f"{gexp}-{glen}")
ax.legend(frameon=False)
ax.set_ylim(ins_ylim)
ax.set_xlim(0,3000)
ax.set_xticks([1000,2000])
ax.set_xticklabels(["TSS","TTS"])
ax.set_yticks([])
ax.set_yticklabels([])
plt.savefig("SuppFig3H-RAD21degron-metaGene-insulation-profile.pdf")
! scp Figure6*pdf ghpcc:/nl/umw_job_dekker/users/sv49w/ALV2021/
! scp SuppFig3H-RAD21degron-metaGene-insulation-profile.pdf ghpcc:/nl/umw_job_dekker/users/sv49w/ALV2021/
## PlaB (splicing inhibition) two replicates pooled together
# ! scp ghpcc:/nl/umw_job_dekker/users/av90w/cooler/20210525_PlaB_same/NT-hg19-combined-90000000.mcool ./
# ! scp ghpcc:/nl/umw_job_dekker/users/av90w/cooler/20210525_PlaB_same/IAA-hg19-combined-90000000.mcool ./
# ! scp ghpcc:/nl/umw_job_dekker/users/av90w/cooler/20210525_PlaB_same/NT-PlaB-hg19-combined-90000000.mcool ./
# ! scp ghpcc:/nl/umw_job_dekker/users/av90w/cooler/20210525_PlaB_same/IAA-PlaB-hg19-combined-90000000.mcool ./
###Output
_____no_output_____ |
notebooks/feature_analysis.ipynb | ###Markdown
Feature Analysis with YellowbrickFeature analysis visualizers are designed to visualize instances in data space in order to detect features or targets that might impact downstream fitting. Because ML operates on high-dimensional data sets (usually several dozen!), the visualizers focus on aggregation, optimization, and other techniques to give overviews of the data. It is our intent that the steering process will allow the data scientist to zoom and filter and explore the relationships between their instances and between dimensions.**Note: If you haven't already downloaded the data, check out the instructions in the notebook called `get_the_data.ipynb` first!**
###Code
import numpy as np
import yellowbrick as yb
###Output
_____no_output_____
###Markdown
Look for separability Radviz`RadViz` is a multivariate data visualization algorithm that plots each feature dimension uniformly around the circumference of a circle then plots points on the interior of the circle such that the point normalizes its values on the axes from the center to each arc. This mechanism allows as many dimensions as will easily fit on a circle, greatly expanding the dimensionality of the visualization.
###Code
from yellowbrick.datasets import load_occupancy
from yellowbrick.features import RadViz
# Load the classification data set
X, y = load_occupancy()
# Specify the target classes
classes = ["unoccupied", "occupied"]
# Instantiate the visualizer
visualizer = RadViz(classes=classes, size=(1080, 720))
visualizer.fit(X, y) # Fit the data to the visualizer
visualizer.transform(X) # Transform the data
visualizer.poof() # Draw/show/poof the data
###Output
_____no_output_____
###Markdown
Data scientists use this method to detect separability between classes. Is there an opportunity to learn from the feature set or is there just too much noise? Parallel CoordinatesParallel coordinates is multi-dimensional feature visualization technique where the vertical axis is duplicated horizontally for each feature. Instances are displayed as a single line segment drawn from each vertical axes to the location representing their value for that feature. This allows many dimensions to be visualized at once; in fact given infinite horizontal space (e.g. a scrolling window), technically an infinite number of dimensions can be displayed!
###Code
from yellowbrick.features import ParallelCoordinates
# Load data set (don't need to do again, included for completeness)
X, y = load_occupancy()
# Specify the features of interest and the classes of the target
features = [
"temperature", "relative humidity", "light", "CO2", "humidity"
]
classes = ["unoccupied", "occupied"]
# Instantiate the visualizer
visualizer = ParallelCoordinates(
classes=classes, features=features, sample=0.05,
shuffle=True, size=(1080, 720)
)
# Fit and transform the data to the visualizer
visualizer.fit_transform(X, y)
# Finalize the title and axes then display the visualization
visualizer.poof()
###Output
_____no_output_____
###Markdown
Data scientists use this method to detect clusters of instances that have similar classes, and to note features that have high variance or different distributions.By inspecting the visualization closely, we can see that the combination of transparency and overlap gives us the sense of groups of similar instances, sometimes referred to as "braids". If there are distinct braids of different classes, it suggests that there is enough separability that a classification algorithm might be able to discern between each class.However, as we inspect this class, we can see that the domain of each feature may make the visualization hard to interpret. In the above visualization, the domain of the `light` feature is from in `[0, 1600]`, far larger than the range of temperature in `[50, 96]`. To solve this problem, each feature should be scaled or normalized so they are approximately in the same domain.Normalization techniques can be directly applied to the visualizer without pre-transforming the data (though you could also do this) by using the `normalize` parameter:
###Code
# Instantiate the visualizer
visualizer = ParallelCoordinates(
classes=classes, features=features,
normalize='standard', # This time we'll specify a normalizer
sample=0.05, shuffle=True, size=(1080, 720)
)
# Fit the visualizer and display it
visualizer.fit_transform(X, y)
visualizer.poof()
###Output
_____no_output_____
###Markdown
Try using `minmax`, `minabs`, `standard`, `l1`, or `l2` normalization above to change perspectives! Look for correlations Feature ImportancesThe feature engineering process involves selecting the minimum required features to produce a valid model because the more features a model contains, the more complex it is (and the more sparse the data), therefore the more sensitive the model is to errors due to variance. A common approach to eliminating features is to describe their relative importance to a model, then eliminate weak features or combinations of features and re-evalute to see if the model fairs better during cross-validation.Many model forms describe the underlying impact of features relative to each other. In scikit-learn, Decision Tree models and ensembles of trees such as Random Forest, Gradient Boosting, and Ada Boost provide a `feature_importances_` attribute when fitted. The Yellowbrick `FeatureImportances` visualizer utilizes this attribute to rank and plot relative importances.
###Code
from sklearn.ensemble import RandomForestClassifier
from yellowbrick.features import FeatureImportances
# Load data set (don't need to do again, included for completeness)
X, y = load_occupancy()
model = RandomForestClassifier(n_estimators=10)
viz = FeatureImportances(model, size=(1080, 720))
viz.fit(X, y)
viz.poof()
###Output
_____no_output_____
###Markdown
The above figure shows the features ranked according to the explained variance each feature contributes to the model. In this case the features are plotted against their *relative importance*, that is the percent importance of the most important feature. The visualizer also contains `features_` and `feature_importances_` attributes to get the ranked numeric values.For models that do not support a `feature_importances_` attribute, the `FeatureImportances` visualizer will also draw a bar plot for the `coef_` attribute that many linear models provide.When using a model with a `coef_` attribute, it is better to set `relative=False` to draw the true magnitude of the coefficient (which may be negative). We can also specify our own set of labels if the dataset does not have column names or to print better titles. Rank 2DA two-dimensional ranking of features utilizes a ranking algorithm that takes into account pairs of features at a time (e.g. joint plot analysis). The pairs of features are then ranked by score and visualized using the lower left triangle of a feature co-occurence matrix.
###Code
from yellowbrick.datasets import load_credit
from yellowbrick.features import Rank2D
# Load the credit dataset
X, y = load_credit()
# Instantiate the visualizer with the Pearson ranking algorithm
visualizer = Rank2D(algorithm='pearson', size=(1080, 720))
visualizer.fit(X, y)
visualizer.transform(X)
visualizer.poof()
###Output
_____no_output_____
###Markdown
By default, the `Rank2D` visualizer utilizes the Pearson correlation score to detect colinear relationships.Alternatively, we can utilize the covariance ranking algorithm, which attempts to compute the mean value of the product of deviations of variates from their respective means. Covariance loosely attempts to detect a colinear relationship between features. Try substituting `algorithm='covariance'` above to see the difference! Look at the distribution PCA ProjectionThe PCA Decomposition visualizer utilizes principal component analysis to decompose high dimensional data into two or three dimensions so that each instance can be plotted in a scatter plot. The use of PCA means that the projected dataset can be analyzed along axes of principal variation and can be interpreted to determine if spherical distance metrics can be utilized.
###Code
from yellowbrick.features.pca import PCADecomposition
# Load data set (don't need to do again, included for completeness)
X, y = load_credit()
# Create a list of colors to assign to points in the plot
colors = np.array(['r' if yi else 'b' for yi in y])
visualizer = PCADecomposition(
scale=True, color=colors, size=(1080, 720)
)
visualizer.fit_transform(X, y)
visualizer.poof()
###Output
_____no_output_____
###Markdown
The PCA projection can also be plotted in three dimensions to attempt to visualize more principal components and get a better sense of the distribution in high dimensions. Try substituting in `visualizer = PCADecomposition(scale=True, color=colors, proj_dim=3)` above to see! ManifoldThe `Manifold` visualizer provides high dimensional visualization using [manifold learning](https://scikit-learn.org/stable/modules/manifold.html) to embed instances described by many dimensions into 2, thus allowing the creation of a scatter plot that shows latent structures in data. Unlike decomposition methods such as PCA and SVD, manifolds generally use nearest-neighbors approaches to embedding, allowing them to capture non-linear structures that would be otherwise lost. The projections that are produced can then be analyzed for noise or separability to determine if it is possible to create a decision space in the data.
###Code
from yellowbrick.datasets import load_concrete
from yellowbrick.features.manifold import Manifold
# Load the concrete data set
X, y = load_concrete()
visualizer = Manifold(
manifold='isomap', target='continuous', size=(1080, 720)
)
visualizer.fit_transform(X,y)
visualizer.poof()
###Output
_____no_output_____
###Markdown
Stochastic Neighbor EmbeddingOne very popular method for visualizing document similarity is to use t-distributed stochastic neighbor embedding, t-SNE. Scikit-learn implements this decomposition method as the `sklearn.manifold.TSNE` transformer. By decomposing high-dimensional document vectors into 2 dimensions using probability distributions from both the original dimensionality and the decomposed dimensionality, t-SNE is able to effectively cluster similar documents. By decomposing to 2 or 3 dimensions, the documents can be visualized with a scatter plot.Unfortunately, TSNE is very expensive, so typically a simpler decomposition method such as SVD or PCA is applied ahead of time. The `TSNEVisualizer` creates an inner transformer pipeline that applies such a decomposition first (SVD with 50 components by default), then performs the t-SNE embedding. The visualizer then plots the scatter plot, coloring by cluster or by class, or neither if a structural analysis is required.
###Code
from yellowbrick.text import TSNEVisualizer
from yellowbrick.datasets import load_hobbies
from sklearn.feature_extraction.text import TfidfVectorizer
# Load the hobbies corpus (don't need to do again, included for completeness)
corpus = load_hobbies()
tfidf = TfidfVectorizer()
docs = tfidf.fit_transform(corpus.data)
labels = corpus.target
# Create the visualizer and draw the vectors
tsne = TSNEVisualizer(size=(1080, 720))
tsne.fit(docs, labels)
tsne.poof()
###Output
_____no_output_____
###Markdown
Token FrequencyOne method for visualizing the frequency of tokens within and across corpora is frequency distribution. A frequency distribution tells us the frequency of each vocabulary item in the text. In general, it could count any kind of observable event. It is a distribution because it tells us how the total number of word tokens in the text are distributed across the vocabulary items.
###Code
from yellowbrick.text import FreqDistVisualizer
from sklearn.feature_extraction.text import CountVectorizer
# Load the hobbies corpus
corpus = load_hobbies()
vectorizer = CountVectorizer()
docs = vectorizer.fit_transform(corpus.data)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer(features=features, size=(1080, 720))
visualizer.fit(docs)
visualizer.poof()
###Output
_____no_output_____
###Markdown
Feature Analysis for Automatic Event Detection
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import pandas as pd
from utils import read_feature_csv
pd.options.display.float_format = '{:.5f}'.format
pd.options.display.max_columns = None
# pandas 0.20 feature for much faster DataFrame computations
# bottleneck and numexpr libraries should be installed via pip
# They aren't essential, comment out if you don't want them.
pd.set_option('compute.use_bottleneck', True)
pd.set_option('compute.use_numexpr', True)
match_id = 20077
feature_df = pd.read_csv('../data/feature/{}_feature.csv'.format(match_id))
event_df = pd.read_csv('../data/event/{}_event.csv'.format(match_id))
display(feature_df.tail())
display(event_df.tail())
###Output
_____no_output_____
###Markdown
IntroductionIn this document we will be analyzing various features obtained from players' $(x, y)$ coordinates via visualizations to see which features may be useful in automatically predicting events in a match.Some example events that we are interested in are1. Foul, Freekick, corner, throw-in, penalty, $\dots$2. Attack, counter-attack, $\dots$3. Goal, offside, $\dots$We need to extract these events by observing how the features of interest evolves with time; thus, we can categorize our aim as a time series analysis task. Furthermore, if our predictions are to be used in real-time, we should be able to predict them in a time frame of 2-3 seconds before the event to 2-3 seconds after, at the latest.In addition, we may use certain events that are easier to infer as prerequisites for other, more complex events. Attack may be prerequisite events for corners, offsides, penalties, etc. This may make the event prediction system more robust.Some example data with the features we will analyze are as follows:
###Code
feature_df.head(5)
###Output
_____no_output_____
###Markdown
If we assume that majority of the match has **the same structure** in a **predefined set of features**, then we can find outliers to predict some events. We extend these definitions:1. **The same structure:** We may assume that in majority of the match, average position of all players are close to the middle of the pitch. (Maybe a multivariate Gaussian with a diagonal covariance matrix). Then, outliers according to this model may indicate attacks, and specific events related to attacks such as corners, freekicks, penalties, etc.2. **Predefined set of features:** Average position of all players is a combination of two of the features we have. We may find relations between other feature(s), or set of features, and events. Examples may be * Referee having close to $0$ speed for several seconds indicating free-kicks, etc. * Very high speeds by multiple players from both teams indicating a counter-attack or a key position. Ideas and Notes on How to Detect Certain EventsNotes about how to detect events and event characteristics.* Italic terms are to be defined numerically. 1. Corner* One player waiting at the corner kick spot for _some time_ with _low speed_.* Fairly _steady positions_ and _low speeds_ **into** sprint or HIR or _high acceleration_ for a _short period_.* Can be done rule based? 2. Free Kick* One (or two, three) player(s) waiting _close_ and _steady_.* Other players are not breaking the _structure_, they move within the structure.* Fairly _steady positions_ and _low speeds_ **into** sprint or HIR or _high acceleration_ for a _short period_.* Can be done rule based? 3. Not Getting Data For A Player i. For a _Short Period_* Cannot go too far. Use previous position?* Can infer throw-ins if a player gets lost and comes back within a _short period_ of time. ii. For a _Long Period_* Injury? Maybe he is getting treatment. 4. Goal i. Goal Celebration* Goal scoring team has _very low_ convex area and player-to-player distance for a _long period_ of time.* After goal celebration, both teams meet in the middle in full formation.* Not always the mentioned pattern. 5. Player Change* From either one of the teams, one player goes off, a new player comes in. Histograms About Various FeaturesWe plot histograms about some features to have a general idea about the data we have. Average Player PositionAverage player position is concentrated on the middle of the pitch, as expected. Very high or very low average player position may indicate attacks and events related to attacks, such as corners, freekicks, etc.
###Code
width = 8
plt.figure(figsize=(width, width*(68/105))) # 105mx68m football pitch
plt.hist2d(feature_df['homeAvgX'], feature_df['homeAvgY'], cmap='hot', bins=16)
plt.xlabel('Horizontal (meters)')
plt.ylabel('Vertical (meters)')
plt.colorbar();
###Output
_____no_output_____
###Markdown
Spread in $x$ and $y$Spread in $y$ is much more uniform than spread in $x$, which is very concentrated around specific values. Outliers in these statistics may prove useful in predicting events related to team spread in $x$ or $y$.
###Code
spreadX = feature_df['homeMaxX'] - feature_df['homeMinX']
spreadY = feature_df['homeMaxY'] - feature_df['homeMinY']
plt.hist(spreadX, label='spreadX', alpha=0.8)
plt.hist(spreadY, label='spreadY', alpha=0.8)
plt.legend()
plt.title('Spread in $x$ and $y$');
###Output
_____no_output_____
###Markdown
Maximum $x$ and $y$Viewing histograms of maximum and minimum $x$ and $y$ values can give us an idea about the general positional structure in the match.
###Code
plt.figure(figsize=(12, 6))
cols = ['homeMaxX', 'homeMinX', 'homeMaxY', 'homeMinY']
num_c = len(cols)
for i in range(num_c):
plt.subplot(num_c//2, num_c//2, i + 1)
plt.hist(feature_df[cols[i]])
plt.title(cols[i])
plt.tight_layout()
###Output
_____no_output_____
###Markdown
The distributions of **playerMaxY** and **playerMinY** may indicate that the vertical distance between the top and the bottom player does not change significantly; teams tend to keep their vertical structure. Time Analysis of Various FeaturesWe try to find patterns in some features that may indicate certain events of interest. Team Inner DistanceDistance sum of players in a team is stored in two features, **homeInnerDistance** and **awayInnerDistance**, and calculated as:$$D_{team_i} = \sum_{j=0}^{10}\sum_{k=j+1}^{10} dist(j, k).$$Significant decreases in inner distance may indicate some events. One such example is goal celebration:
###Code
# get goal minutes
goal_event_ids = [80, 96]
goal_event_mask = np.in1d(event_df['eventId'].values, goal_event_ids)
goal_events = event_df[goal_event_mask]
goal_mins = (goal_events['minute'] + goal_events['second']/60).values
# feature data
dist = feature_df['homeInnerDistance']
mins = feature_df['minute'] + feature_df['second']/60
threshold = 400
# plot minutes vs. distance
plt.plot(mins, dist, '.', markersize='2.25')
# horizontal line to see easier
plt.axhline(threshold, color='red')
plt.xlabel('Time (minutes)')
plt.ylabel('Inner Distance (meters)')
print('Minutes that have inner distance less than {}: {}'
.format(threshold, np.unique(mins[dist < threshold].astype(int))))
print('Goal minutes: {}'.format(goal_mins.astype(int)))
###Output
Minutes that have inner distance less than 400: [15 16 22 23 24 25 35 36 37 43 50 63 81 87]
Goal minutes: [23 25 62 80 87]
###Markdown
Yet, this statistic is not indicative of goals by itself.
###Code
dist = feature_df['awayInnerDistance']
plt.plot(mins, dist, '.');
plt.axhline(threshold, color='red')
print('Unique minutes that have inner distance less than {}: {}'\
.format(threshold, np.unique(mins[dist < threshold].astype(int))))
print('Goal minutes: {}'.format(goal_mins.astype(int)))
###Output
Unique minutes that have inner distance less than 400: [ 3 17 35 37 44 58 59]
Goal minutes: [23 25 62 80 87]
###Markdown
Difference of Inner DistancesDifference of inner distance sums can be used in predicting which team has ball possession, and specifically which team is on the attack. Teams that has the ball generally have higher inner distance sum since they tend to1. have players on top and bottom wings away from the other players2. have attacking players close to opponent's goalpost while their defence players waiting close to the middle of the pitchBelow we compare the difference of inner distances and actual **hasBall** event data. Positive event data means home team has the ball. Thus, we expect the signs of event data and inner distance differences to be the same.
###Code
# get home team id
hasball_df = pd.read_csv('../data/hasball/{}_hasball.csv'.format(match_id))
home_id = (hasball_df[hasball_df['teamPoss'] == 1]['hasballTeam'].values[0])
# hasball events
hb_df = event_df.loc[event_df['eventId'] == 2].copy()
# combine minutes and seconds to floats
hb_df['minute'] += hb_df['second']/60
hb_df = hb_df[['teamId', 'minute']]
hb_df.reset_index(drop=True, inplace=True)
teamId, minute = hb_df.values.T # get underlying columns as np arrays
inner_diff = feature_df['homeInnerDistance'] - feature_df['awayInnerDistance']
max_diff = np.percentile(inner_diff, 99)
min_diff = np.percentile(inner_diff, 1)
# set team ids to found percentiles for visualization
teamId[teamId == home_id] = max_diff
teamId[teamId != max_diff] = min_diff
# plot
plt.figure(figsize=(12, 6))
plt.xlim([0, 20])
plt.plot(mins, inner_diff, '.', label='Inner distance difference')
plt.plot(minute, teamId, 'o', label='Event data')
plt.axhline(0, color='r')
plt.grid()
plt.legend();
###Output
_____no_output_____
###Markdown
Maximum and Minimum $x$ ThresholdingSignificantly low **maximum $x$** values may indicate an attack, specifically corner kicks or throw-ins very close to the outline, by one team whereas significantly high **minimum $x$** values may indicate an attack by the other.Below we see playerMaxX and playerMinX values.
###Code
attack_threshold = 40
plt.figure(figsize=(12, 3))
cols = {
'homeMaxX' : ([14, 24], attack_threshold),
'homeMinX' : ([20, 30], 105 - attack_threshold),
}
i = 1
for col, (lim, threshold) in cols.items():
plt.subplot(1, 2, i)
plt.xlim(lim) # limits to zoom in
plt.title(col)
plt.plot(mins, feature_df[col], '.')
plt.axhline(threshold, color='red');
i += 1
###Output
_____no_output_____
###Markdown
DifferentialSignificant decreases or increases on playerMaxX or playerMinX may indicate counter attacks or very fast-developing attacks and key positions.
###Code
def plot_differential(mins, cols, *args, dt=1, **kwargs):
'''
Plot the differentials of the columns of a pandas DataFrame or Series.
Data points should be one second apart.
Parameters
----------
cols: pandas DataFrame or Series object containing arrays in its columns
whose differentials will be plotted.
*args: Forwarded to matplotlib.pyplot.plot
dt: Time interval between two data points in the differential, in seconds.
Must be a positive integer.
**kwargs: Forwarded to matplotlib.pyplot.plot
'''
cols = pd.DataFrame(cols)
not_int = dt != int(dt)
if not_int or dt <= 0:
raise ValueError('dt must be a positive integer')
for cname in cols:
arr = cols[cname].values
differential = (arr[dt::dt] - arr[0:-dt:dt])
time_scale = mins[dt::dt]
plt.plot(time_scale, differential, *args, **kwargs, label=cname)
plt.legend();
plot_differential(mins, feature_df[['homeMaxX', 'homeMaxY']], dt=1)
###Output
_____no_output_____
###Markdown
Referee Position for Penalty DetectionReferee position, more so the $x$ coordinate, can be used for penalty detection. Referees tend to not stay in their position predetermined "penalty position" for _prolonged periods_.Referee position may also be combined with referee speed.
###Code
penalty_events = event_df[event_df['eventId'] == 93]
if penalty_events.empty:
print('No penalty event in this match')
else:
penalty_min = int(penalty_events['minute'].iloc[0])
print('Penalty at {}'.format(penalty_min))
max_min = feature_df['minute'].max()
cols = {
'refX' : [0, 100],
'refY' : [0, 100],
'refSpeed': [0, 10],
}
plt.figure(figsize=(10, 7))
i = 1
for col, lim in cols.items():
plt.subplot(2, 2, i)
plt.title(col)
plt.xlim([max(penalty_min - 5, 0), min(penalty_min + 5, max_min)])
plt.ylim(lim)
plt.plot(mins, feature_df[col], '.')
plt.axvline(penalty_min - 2, color='red')
plt.axvline(penalty_min + 2, color='red')
i += 1
plt.tight_layout()
###Output
Penalty at 25
###Markdown
convexFarDistanceLow **convexFarDistance** values can be used to determine when the players pack together and the whole game is played on a very small area. This may in turn be useful in predicting 1. important fouls2. goal kicksHowever, low **convexFarDistance** value may also point to parts of the game where both teams are trying keep hold of the ball in the middle of the pitch and not many important events happening. Thus, it may not be an indicative feature by itself.
###Code
plt.plot(mins, feature_df['playerConvexFarDistance']);
plt.grid();
###Output
_____no_output_____
###Markdown
convexClosestDistance Using Along with convexFarDistanceWe can check where $convexFarDistance + convexClosestDistance$ takes low values to see where both **convexFarDistance** and **convexClosestDistance** takes low values. This may allow us to identify parts where* furthest player is close to the convex center* closest player is very close to the convex center
###Code
plt.plot(
mins,
feature_df['playerConvexFarDistance'] + feature_df['playerConvexClosestDistance'],
label='convexFarDistance + convexClosestDistance'
)
plt.plot(mins, feature_df['playerConvexFarDistance'])
plt.legend()
plt.grid()
plt.xlim([75, 85]);
###Output
_____no_output_____
###Markdown
convexMaxSpeed_Prolonged_ periods of _very low_ **convexMaxSpeed** may indicate events that stops the game or events that happens after a game stop such as1. out, goal kick2. foul3. freekick, penalty, corner, etc.4. player switching
###Code
plt.figure(figsize=(12, 3))
plt.plot(mins, feature_df['playerConvexMaxSpeed'], '.', markersize='1.5')
plt.axhline(2, color='r');
plt.legend();
###Output
_____no_output_____
###Markdown
Bitcoin Battle Royale - Getting started (Python 2 notebook) Welcome to Bitcoin Battle Royale, a contest where you have the predict the probability of bitcoin moving more than 0.25% over the next 30min.This notebook aims to provide a few code snippets you can use to download data from s3, and also a few ways to interact with the feature set.You don't have to use any of this code, this should only be considered as a quick start guide...If you have any questions, ask Eben or Mike :) How to download data from S3 This notebook references a config.ini file in the config folder. You should enter your AWS access key and secret key in the config.ini file. If you wanted to use ConfigParser to load your credentials, this is what it would look like:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import statsmodels.graphics.tsaplots as tsa
import configparser, os
Config = configparser.ConfigParser(allow_no_value=True)
Config.read(os.path.join(os.path.dirname(os.getcwd()), 'config', 'config.ini'))
aws_access_key_id = Config.get("aws_credentials", "aws_access_key_id")
aws_secret_access_key = Config.get("aws_credentials", "aws_secret_access_key")
bucket_name_value = Config.get("aws_credentials", "bucket_name_value")
###Output
_____no_output_____
###Markdown
To download data, we're going to create a download folder on our local machine...
###Code
download_folder = os.path.join(os.path.dirname(os.getcwd()), 'data')
if not os.path.exists(download_folder):
os.makedirs(download_folder)
print("Created download folder", download_folder)
else:
print("Download folder already exists", download_folder)
###Output
Download folder already exists C:\Users\Brad Deutsch\Documents\Projects\btc_battle\data
###Markdown
Great! Now we're ready to download data from s3.The following function makes it easy to list all the bitcoin files available for download in our s3 bucket...
###Code
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
def download_most_recent_tick_data_files(download_destination=download_folder,
number_of_files_to_download=5):
# establish a connection using our access keys...
conn = boto.connect_s3(aws_access_key_id,
aws_secret_access_key,
calling_format=OrdinaryCallingFormat())
# connect to our bucket...
bucket = conn.get_bucket(bucket_name_value)
# connect to the folder that contains all the bitcoin trading data...
most_recent_files = sorted([k.key for k in boto.s3.bucketlistresultset.bucket_lister(bucket,
prefix="featuredata/GDAX/")],
reverse=True)[0:int(number_of_files_to_download)]
for most_recent_file_key in most_recent_files:
k = Key(bucket, most_recent_file_key)
k.get_contents_to_filename(os.path.join(download_destination,
most_recent_file_key.split("/")[-1]))
print("Downloaded to local...", most_recent_file_key)
# call the function...
download_most_recent_tick_data_files()
###Output
('Downloaded to local...', u'featuredata/GDAX/2018-04-25-GDAX.finalfeatures.csv')
('Downloaded to local...', u'featuredata/GDAX/2018-04-24-GDAX.finalfeatures.csv')
('Downloaded to local...', u'featuredata/GDAX/2018-04-23-GDAX.finalfeatures.csv')
('Downloaded to local...', u'featuredata/GDAX/2018-04-22-GDAX.finalfeatures.csv')
('Downloaded to local...', u'featuredata/GDAX/2018-04-21-GDAX.finalfeatures.csv')
###Markdown
Quick description of features...
###Code
import pandas as pd
# pick the most recent file for analysis...
most_recent_tick_data_file = os.path.join(download_folder,
sorted(os.listdir(download_folder), reverse=True)[0])
data_df = pd.read_csv(most_recent_tick_data_file)
print(most_recent_tick_data_file, "df loaded, shape", data_df.shape)
###Output
C:\Users\Brad Deutsch\Documents\Projects\btc_battle\data\2017-12-16-GDAX.finalfeatures.csv df loaded, shape (60761, 115)
###Markdown
Plot of prices
###Code
# convert times to datetimes
data_df['time'] = pd.to_datetime(data_df['time'])
from datetime import timedelta
def hhmax(row):
ser = data_df['price'][(data_df['time'] > row) &
(data_df['time'] <= row + timedelta(minutes=30))]
return ser.max()
data_df['MaxY'] = data_df['time'].apply(hhmax)
from datetime import timedelta
import numpy as np
def hhmax(row):
ser = data_df['price'][(data_df['time'] > row) &
(data_df['time'] <= row + timedelta(minutes=30))]
minY = np.min(ser)
maxY = np.max(ser)
currentY = ser[0]
thresh_low = currentY * (1 - 0.0025)
thres_hi = currentY * (1 + 0.0025)
exceeds_low == minY <= thresh_low
exceeds_hi == maxY >= thresh_hi
if (not exceeds_low) and (not exceeds_hi):
target = 0
elif (exceeds_low) and (not exceeds_hi):
target = 1
elif (not exceeds_low) and (exceeds_hi):
target = 2
else:
target = 3
return target
data_df['MinY'] = data_df['time'].apply(hhmax)
if 1==0:
a = 1
elif 1 == 1:
a = 2
# define plotting function
def tsplot2(y, title, lags=None, figsize=(12, 8)):
"""
Credit to Jeffrey Yau, PyData NYC 2017, "Time Series Forecasting using Statistical and Machine Learning Models"
Examine the patterns of ACF and PACF, along with the time series plot and histogram.
:param y:
:param title:
:param lags:
:param figsize:
:return:
"""
fig = plt.figure(figsize=figsize)
layout = (2,2)
ts_ax = plt.subplot2grid(layout, (0,0))
hist_ax = plt.subplot2grid(layout, (0, 1))
acf_ax = plt.subplot2grid(layout, (1, 0))
pacf_ax = plt.subplot2grid(layout, (1, 1))
y.plot(ax=ts_ax)
ts_ax.set_title(title, fontsize=14, fontweight='bold')
y.plot(ax=hist_ax, kind='hist', bins=25)
hist_ax.set_title('Histogram')
tsa.plot_acf(y, lags=lags, ax=acf_ax)
tsa.plot_pacf(y, lags=lags, ax=pacf_ax)
[ax.set_xlim(0) for ax in [acf_ax, pacf_ax]]
sns.despine()
plt.tight_layout()
return ts_ax, acf_ax, pacf_ax
data = pd.read_csv('target_test.csv')
layout = (1,2)
price_ax = plt.subplot2grid(layout, (0,0))
target_ax = plt.subplot2grid(layout, (0,1))
data['price'][:10000].plot(ax=price_ax)
data['target'][:10000].plot(ax=target_ax)
# for each series, plot a time series, histogram, autocorr, and partial autocorr
tsplot2(data_df['target'].dropna(), title='MaxY', lags=100)
# we could just dump all the feature names in alphabetical order...
for feature in sorted(data_df.columns.values):
print(feature)
###Output
0th_larg_order
0th_larg_order_1000_rar
0th_larg_order_100_rar
0th_larg_order_10_rar
1th_larg_order
1th_larg_order_1000_rar
1th_larg_order_100_rar
1th_larg_order_10_rar
2th_larg_order
2th_larg_order_1000_rar
2th_larg_order_100_rar
2th_larg_order_10_rar
3th_larg_order
3th_larg_order_1000_rar
3th_larg_order_100_rar
3th_larg_order_10_rar
4th_larg_order
4th_larg_order_1000_rar
4th_larg_order_100_rar
4th_larg_order_10_rar
5th_larg_order
5th_larg_order_1000_rar
5th_larg_order_100_rar
5th_larg_order_10_rar
6th_larg_order
6th_larg_order_1000_rar
6th_larg_order_100_rar
6th_larg_order_10_rar
7th_larg_order
7th_larg_order_1000_rar
7th_larg_order_100_rar
7th_larg_order_10_rar
8th_larg_order
8th_larg_order_1000_rar
8th_larg_order_100_rar
8th_larg_order_10_rar
9th_larg_order
9th_larg_order_1000_rar
9th_larg_order_100_rar
9th_larg_order_10_rar
Unnamed: 0
last_size
last_size_1000_rar
last_size_100_rar
last_size_10_rar
max_price
max_price_1000_rar
max_price_100_rar
max_price_10_rar
max_volume
max_volume_1000_rar
max_volume_100_rar
max_volume_10_rar
mean_price
mean_price_1000_rar
mean_price_100_rar
mean_price_10_rar
mean_volume
mean_volume_1000_rar
mean_volume_100_rar
mean_volume_10_rar
median_price
median_price_1000_rar
median_price_100_rar
median_price_10_rar
median_volume
median_volume_1000_rar
median_volume_100_rar
median_volume_10_rar
min_price
min_price_1000_rar
min_price_100_rar
min_price_10_rar
min_volume
min_volume_1000_rar
min_volume_100_rar
min_volume_10_rar
price
price_1000_rar
price_100_rar
price_10_rar
price_index
price_index_1000_rar
price_index_100_rar
price_index_10_rar
price_spread
price_spread_1000_rar
price_spread_100_rar
price_spread_10_rar
side
std_price
std_price_1000_rar
std_price_100_rar
std_price_10_rar
std_volume
std_volume_1000_rar
std_volume_100_rar
std_volume_10_rar
time
volume_over_price
volume_over_price_1000_rar
volume_over_price_100_rar
volume_over_price_10_rar
volume_spread
volume_spread_1000_rar
volume_spread_100_rar
volume_spread_10_rar
volume_under_price
volume_under_price_1000_rar
volume_under_price_100_rar
volume_under_price_10_rar
vwp
vwp_1000_rar
vwp_100_rar
vwp_10_rar
###Markdown
Key points:1. Whenever you see a _rar value, remember that it refers to rolling average return by time, e.g. 10m = 10 minutes, 1m = 1 minute, 60m = an hour...2. "larg_order" features refer to the orderbook on GDAX, largest orders, already normalized3. "last_size" refers to the size of the most recent trade in BTC4. "max_price", "min_price", "std_price", "max_volume", "mean_price" etc. all refers to statistical properties of recent price and volume trends5. "side" is a variable that shows if the recent trade was a buy order or a sell order...6. "time" is the UTC timestamp of the trade7. "volume_over_price" and "volume_under_price" - ?8. "vwp" refers to volume weighted price Display plots for feature interaction
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = [10, 10]
feature_categories_to_plot = ["_larg_order_10_rar", "_price_10_rar", "_volume_10_rar"]
for feature_category in feature_categories_to_plot:
features_to_scan = sorted([h for h in data_df.columns.values if h.find(feature_category) != -1])[0:4]
data_df[features_to_scan].hist()
###Output
_____no_output_____
###Markdown
...notice how very few of these features look like they have a "normal" distribution...this is likely due to the impact of outliers...One quick and easy way to filter for outliers is to simply remove the bottom x% and the top x% of a feature set...
###Code
import numpy as np
import warnings
warnings.filterwarnings('ignore')
feature_categories_to_plot = ["_larg_order_10_rar", "_price_10_rar", "_volume_10_rar"]
for feature_category in feature_categories_to_plot:
features_to_scan = sorted([h for h in data_df.columns.values if h.find(feature_category) != -1])[0:4]
outliers_removed_df = data_df[features_to_scan].copy()
outliers_removed_df.dropna(inplace=True)
for feature_to_scan in features_to_scan:
lower_bound = np.percentile(outliers_removed_df[feature_to_scan], 10)
upper_bound = np.percentile(outliers_removed_df[feature_to_scan], 90)
print(feature_to_scan, "lower_bound", lower_bound, "upper_bound", upper_bound)
outliers_removed_df[feature_to_scan] = outliers_removed_df[feature_to_scan].map(lambda x: lower_bound if x < lower_bound
else upper_bound if x > upper_bound
else x)
outliers_removed_df[features_to_scan].hist()
# some of the feature sets have very high correlations with other feature sets...
# this is a function that takes a feature set as a string tag and generates correlation matrices
import seaborn as sns
def generate_correlation_plot(feature_category_to_plot):
correlation_df = data_df.copy()
headers_to_plot = sorted([h for h in correlation_df.columns.values if h.find(feature_category_to_plot) != -1])
correlation_df = correlation_df[headers_to_plot]
correlation_df.dropna(inplace=True)
corr = correlation_df.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
generate_correlation_plot("_volume_")
generate_correlation_plot("_price_")
generate_correlation_plot("_larg_order_10_")
###Output
_____no_output_____
###Markdown
Feature Selection
###Code
def sort_desc_print(tuple_list):
s_list = sorted([(feature, score) for feature, score in tuple_list], key=lambda tup: tup[1], reverse=True)
print('\n'.join(map(lambda v: v[0] + ': ' + str(v[1]), s_list)))
###Output
_____no_output_____
###Markdown
Pearson Correlation
###Code
f_test, p_value = f_regression(ind_df, dep_df)
sort_desc_print(zip(ind_df.columns, f_test))
###Output
Sex: 372.405723602
Pclass_3: 103.057599394
Cabin_nan: 99.2531423426
Pclass_1: 79.1364033091
Fare: 63.030764228
Cabin_B: 28.1172283504
Embarked_C: 25.8959869956
Embarked_S: 22.0754685855
Cabin_D: 20.6631861322
Cabin_E: 20.6631861322
Cabin_C: 11.8416605341
Pclass_2: 7.8148047228
Embarked_Q: 0.0118463439904
###Markdown
Mutual Information (MIC)
###Code
mi = mutual_info_regression(ind_df, dep_df)
mi /= np.max(mi)
sort_desc_print(zip(ind_df.columns, mi))
###Output
Sex: 1.0
Fare: 0.813183007255
Cabin_nan: 0.652875428308
Pclass_3: 0.306936307782
Pclass_1: 0.291382846285
Cabin_D: 0.239848945013
Cabin_C: 0.172495119254
Cabin_B: 0.14044352665
Pclass_2: 0.0661308728842
Cabin_E: 0.0403480347598
Embarked_Q: 0.0129620396289
Embarked_C: 0.0
Embarked_S: 0.0
###Markdown
Random Forests
###Code
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(n_estimators=1000, max_depth=100, n_jobs=4)
model.fit(ind_df, dep_df)
sort_desc_print(zip(ind_df.columns, model.feature_importances_))
###Output
Fare: 0.401696507844
Sex: 0.368575265298
Pclass_3: 0.100095863165
Cabin_nan: 0.0395412781757
Embarked_S: 0.0210514895722
Embarked_C: 0.0163977530968
Cabin_E: 0.0101863231028
Cabin_C: 0.00966006947128
Pclass_1: 0.00720195636071
Pclass_2: 0.00705191613979
Embarked_Q: 0.00697178649388
Cabin_D: 0.00590087636242
Cabin_B: 0.00566891491751
###Markdown
Look for Collinearity
###Code
import seaborn as sns
%matplotlib inline
# calculate the correlation matrix
corr = ind_df.corr()
# plot the heatmap
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(11.7, 8.27)
sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns,
ax=ax)
sns.despine()
corr_df = ind_df.corr()
import itertools
corr_tups = dict()
for colA, colB in itertools.product(corr_df.columns, corr_df.index):
if colA == colB or (colA, colB) in corr_tups or (colB, colA) in corr_tups:
continue
corr_tups[(colA, colB)] = corr_df.loc[colA, colB]
sort_desc_print([(':'.join(key), val) for key, val in corr_tups.items() if abs(val) > 0.5])
###Output
Fare:Pclass_1: 0.591710718884
Pclass_3:Cabin_nan: 0.539290725364
Pclass_2:Pclass_3: -0.565210153554
Pclass_1:Pclass_3: -0.62673758464
Embarked_C:Embarked_S: -0.778358996113
Pclass_1:Cabin_nan: -0.788773006897
|
netflix-movies-tv-shows/netflix-eda.ipynb | ###Markdown
[Read up to find out more about missing values](https://www.kaggle.com/meikegw/filling-up-missing-values)
###Code
# Handling the missing data for director and the cast as this data is not of much relavence
df.drop(['director', 'cast'], axis=1, inplace=True)
# Handling the missing coutry data
df['country'].fillna('United States', inplace=True)
# Added the one missing value with most freq date
df['date_added'].fillna('January 1, 2020', inplace=True)
# Done handling all the missing values
missingno.matrix(df)
# Individual attiributes
# Country
top_10_country = df['country'].value_counts().head(10)
sns.barplot(top_10_country.values, top_10_country.index)
# Types
fig = plt.figure(figsize=(20,1))
sns.countplot(y='type', data=df)
print(df['type'].value_counts())
# Rating
fig = plt.figure(figsize=(20,8))
sns.countplot(y='rating', data=df)
print(df['rating'].value_counts().head())
# Getting a view on the release year
fig = plt.figure(figsize=(13, 6))
fig = sns.countplot(x='release_year', data=df)
fig.set_xticklabels(fig.get_xticklabels(), rotation=90);
fig = plt.figure(figsize=(8,8))
sns.countplot(data=df, x='rating', hue='type')
# Percnetge of ratings given to each rating
df['rating'].value_counts().plot.pie(autopct='%1.1f%%', figsize=(10,10));
###Output
_____no_output_____ |
notebooks/analysis/2_metro_by_metro_regression.ipynb | ###Markdown
1. Import Data HMDA Data that contains all dummy variables
###Code
hmda19_df = pd.read_csv('../../data/hmda_lar/cleaned_data/3_hmda2019_regressiondata_210823.csv',
dtype = {'app_credit_model': str, 'metro_code': str})
hmda19_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2433071 entries, 0 to 2433070
Data columns (total 63 columns):
# Column Dtype
--- ------ -----
0 black float64
1 latino float64
2 asian_cb float64
3 native float64
4 race_na float64
5 no_coapplicant float64
6 na_coapplicant float64
7 female float64
8 sex_na float64
9 less_than25 float64
10 between25_34 float64
11 between45_54 float64
12 between55_64 float64
13 older_than65 float64
14 age_na float64
15 income_log float64
16 loan_log float64
17 property_value_ratio float64
18 less30yrs_mortgage float64
19 more30yrs_mortgage float64
20 equifax float64
21 experian float64
22 other_model float64
23 more_than_one float64
24 model_na float64
25 dti_manageable float64
26 dti_unmanageable float64
27 dti_struggling float64
28 combined_loan_to_value_ratio float64
29 moderate_lmi float64
30 middle_lmi float64
31 low_lmi float64
32 credit_union float64
33 independent float64
34 lender_na float64
35 lar_count int64
36 non_desktop float64
37 aus_na float64
38 white_cat2 float64
39 white_cat3 float64
40 white_cat4 float64
41 metro_90th float64
42 metro_80th float64
43 metro_70th float64
44 metro_60th float64
45 metro_50th float64
46 metro_40th float64
47 metro_30th float64
48 metro_20th float64
49 metro_10th float64
50 metro_less10th float64
51 micro_area float64
52 metro_none float64
53 denied float64
54 loan_outcome int64
55 younger_than_34 float64
56 older_than_55 float64
57 not30yr_mortgage float64
58 metro_code object
59 lei object
60 app_race_ethnicity int64
61 app_credit_model object
62 property_value_ratio.1 float64
dtypes: float64(57), int64(3), object(3)
memory usage: 1.1+ GB
###Markdown
Metro Data
###Code
metros_df = pd.read_csv('../../data/census_data/county_to_metro_crosswalk/clean/all_counties_210804.csv',
dtype = str)
metros_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 3225 entries, 0 to 3224
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 fips_state_code 3224 non-null object
1 fips_county_code 3224 non-null object
2 county_name 3220 non-null object
3 state_name 3220 non-null object
4 cbsa_code 1916 non-null object
5 cbsa_title 1916 non-null object
6 csa_code 1256 non-null object
7 csa_title 1256 non-null object
8 metro_type 3225 non-null object
9 metro_code 1916 non-null object
10 metro_name 1916 non-null object
11 metro_pop 1915 non-null object
12 metro_percentile 3225 non-null object
13 metro_type_def 3225 non-null object
dtypes: object(14)
memory usage: 352.9+ KB
###Markdown
2. Reshape metro data Data is at the county level, grouping by metros
###Code
metros_df2 = pd.DataFrame(metros_df.groupby(by = ['metro_code', 'metro_name', 'metro_type', 'metro_pop'],
dropna = False).size()).reset_index().rename(columns = {0: 'count'}).\
drop(columns = {'count'})
metros_df2['metro_pop'] = pd.to_numeric(metros_df2['metro_pop'])
###Output
_____no_output_____
###Markdown
3. Filter HMDA data Filtering those NA's that are less than one percent of the column. These values break at the metro level- 0: Yes- 1: No
###Code
hmda19_df['na_coapplicant'].value_counts(dropna = False, normalize = True) * 100
hmda19_df['age_na'].value_counts(dropna = False, normalize = True) * 100
hmda19_df['lender_na'].value_counts(dropna = False, normalize = True) * 100
hmda19_df2 = hmda19_df[(hmda19_df['na_coapplicant'] != 0) & (hmda19_df['age_na'] != 0) &\
(hmda19_df['lender_na'] != 0)]
###Output
_____no_output_____
###Markdown
Filter out Metros with no code
###Code
hmda19_df3 = hmda19_df2[(hmda19_df2['metro_code'].notnull())]
print(len(hmda19_df3))
###Output
2357940
###Markdown
4. Setup independent variables
###Code
independent_vars = ['black', 'latino', 'native', 'asian_cb', 'race_na',
'female', 'sex_na',
'no_coapplicant',
'younger_than_34', 'older_than_55',
'income_log',
'loan_log',
'property_value_ratio',
'not30yr_mortgage',
'equifax', 'experian', 'other_model', 'more_than_one', 'model_na',
'dti_manageable', 'dti_unmanageable', 'dti_struggling',
'combined_loan_to_value_ratio',
'low_lmi', 'moderate_lmi', 'middle_lmi',
'credit_union', 'independent',
'lar_count',
'non_desktop', 'aus_na',
'white_cat2', 'white_cat3', 'white_cat4']
continuous_vars = ['income_log', 'loan_log', 'combined_loan_to_value_ratio', 'lar_count', 'prop_zscore']
###Output
_____no_output_____
###Markdown
5. Get variable count for each metro
###Code
metros = hmda19_df3['metro_code'].unique()
print('Number of metros: ' + str(len(metros)))
###Output
Number of metros: 959
###Markdown
Count all the independent variables for each metro
###Code
### Excluding continous variables from the counting
independent_vars2 = [var for var in independent_vars if var not in continuous_vars]
metro_var_holder = []
for independent_var in independent_vars2:
index_values = []
index_values.extend(('metro_code', independent_var))
metro_var_df = pd.pivot_table(hmda19_df3, index = index_values, columns = ['loan_outcome'],
values = ['denied'], aggfunc = 'count', fill_value = 0).reset_index()
metro_var_df.columns = metro_var_df.columns.droplevel(0)
metro_var_df.columns.name = None
metro_var_df.columns = ['metro_code', 'variable_flag', 'loan', 'denied']
metro_var_df['variable_name'] = independent_var
metro_var_holder.append(metro_var_df)
metro_varcount_df = pd.concat(metro_var_holder)
metro_varcount_df['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Add missing records to the variable count dataframe
###Code
metro_varcount_df2 = metro_varcount_df[(metro_varcount_df['variable_flag'] == 0)]
missing_rows_list = []
for metro in metros:
metro_vars_df = metro_varcount_df2[(metro_varcount_df2['metro_code'] == metro)]
metro_vars = metro_vars_df['variable_name'].unique()
### including the continous variables
for reference_var in independent_vars:
if reference_var not in metro_vars:
missing_row = pd.DataFrame([[metro, 0, 0, 0, reference_var]], columns = ['metro_code',
'variable_flag', 'loan', 'denied', 'variable_name'])
missing_rows_list.append(missing_row)
missing_rows_df = pd.concat(missing_rows_list)
metro_varcount_df3 = metro_varcount_df2.append(missing_rows_df)
###Output
_____no_output_____
###Markdown
Find variable total count and percentages
###Code
metro_varcount_df3['total_count'] = metro_varcount_df3['loan'] + metro_varcount_df3['denied']
metro_varcount_df3['loan_pct'] = metro_varcount_df3['loan'].div(metro_varcount_df3['total_count']).multiply(100)
metro_varcount_df3['denied_pct'] = metro_varcount_df3['denied'].\
div(metro_varcount_df3['total_count']).multiply(100)
###Output
_____no_output_____
###Markdown
6. Run regression for all metros
###Code
metro_analysis = []
i = 0
for metro in metros:
print(str(i) + ': Metro: ' + metro)
metro_df = hmda19_df3[(hmda19_df3['metro_code'] == metro)]
metro_apps = len(metro_df)
regression_formula = create_formula(independent_vars)
model = run_regression(data = metro_df, formula = regression_formula)
try:
results = model.fit()
info = results.mle_retvals['converged']
results_df = convert_results_to_df(results)
results_df.insert(0, 'metro_code', metro)
results_df.insert(1, 'metro_apps', metro_apps)
results_df.insert(2, 'psuedo_rsquare', results.prsquared)
results_df['iteration_flag'] = info
except:
independent_nan_list = []
for regression_var in independent_vars:
results_dict = {'metro_code': metro, 'metro_apps': metro_apps, 'variable_name': regression_var,
'standard_error': np.nan, 'z_value': np.nan, 'p_value': np.nan, 'odds_ratio': np.nan,
'iteration_flag': np.nan, 'psuedo_rsquare': np.nan}
non_results_df = pd.DataFrame([results_dict], columns = results_dict.keys())
independent_nan_list.append(non_results_df)
results_df = pd.concat(independent_nan_list)
metro_analysis.append(results_df)
i += 1
results_df2 = pd.concat(metro_analysis)
###Output
0: Metro: 35084
Optimization terminated successfully.
Current function value: 0.203528
Iterations 8
1: Metro: 35614
Optimization terminated successfully.
Current function value: 0.260659
Iterations 7
2: Metro: 35154
Optimization terminated successfully.
Current function value: 0.212554
Iterations 8
3: Metro: 39100
Optimization terminated successfully.
Current function value: 0.191431
Iterations 8
4: Metro: 45940
Warning: Maximum number of iterations has been exceeded.
Current function value: 0.218257
Iterations: 35
5: Metro: 20700
###Markdown
7. Join results with metros data Is every variable accounted for:- If a variables has less than 959, then it's missing in certain metros
###Code
var_used_check = pd.DataFrame(results_df2['variable_name'].value_counts(dropna = False)).reset_index()
var_used_check[(var_used_check['variable_name'] < 959)]
###Output
_____no_output_____
###Markdown
Join with metro names and var count dataframes
###Code
results_df3 = pd.merge(results_df2, metros_df2, how = 'left', on = ['metro_code'])
results_df4 = pd.merge(results_df3, metro_varcount_df3, how = 'left', on = ['metro_code', 'variable_name'])
###Output
_____no_output_____
###Markdown
8. Categorize Results Filter for metros that don't produce any results- Results Flag 1: No meaningful results, all columns shows up as NaN
###Code
results_df4.loc[((results_df4['psuedo_rsquare'].isnull()) & (results_df4['coefficient'].isnull()) & \
(results_df4['standard_error'] .isnull()) & (results_df4['z_value'].isnull()) & \
(results_df4['p_value'].isnull()) & (results_df4['odds_ratio'].isnull())),
'results_flag'] = '1'
metros_no_results = results_df4[(results_df4['results_flag'] == '1')]['metro_code'].nunique()
all_metros = results_df4['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Metro Results Breakdown:- 709 metros don't produce results- 250 metros prodocues results
###Code
print('Percent of metros with no results: ' + str(((metros_no_results/all_metros) * 100)))
print('Number of metros that DON\'T produce results: ' + str(metros_no_results))
print('Number of metros that produce results: ' + str(results_df4[(results_df4['results_flag'] != '1')] \
['metro_code'].nunique()))
metros_size_df = pd.DataFrame(results_df4[(results_df4['results_flag'] == '1')].\
groupby(by = ['metro_code', 'metro_apps']).size()).reset_index().rename(columns = {0: 'count'})
metros_size_df['metro_apps'].describe()
###Output
_____no_output_____
###Markdown
Filter for metros that produce the results, but the equation needs work- Results Flag: 2, no meaningful results because of the equation
###Code
results_df4.loc[(results_df4['psuedo_rsquare'] < .1) | (results_df4['iteration_flag'] == False),
'results_flag'] = '2'
broken_results_metros = results_df4[(results_df4['results_flag'] == '2')]['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Metro Results Breakdown:- 121 metros with unreliable results, not enoght variance in variables.
###Code
print('Number of metros with broken results: ' + str(broken_results_metros))
print('Percentage of metros with no results: ' + str((broken_results_metros/all_metros) * 100))
###Output
Number of metros with broken results: 122
Percentage of metros with no results: 12.721584984358708
###Markdown
Filter for metros where the equation is valid- Results flag 3: valid results
###Code
results_df4.loc[(results_df4['psuedo_rsquare'] >= .1) & (results_df4['iteration_flag'] == True),
'results_flag'] = '3'
valid_results_metros = results_df4[(results_df4['results_flag'] == '3')]['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Metro Results Breakdown:- 128 metros with reliable results.
###Code
print('Number of metros with valid results: ' + str(valid_results_metros))
print('Percentage of metros with valid results: ' + str((valid_results_metros/all_metros) * 100))
###Output
Number of metros with valid results: 128
Percentage of metros with valid results: 13.347236704900938
###Markdown
Overall metros breakdown- 1: No Results- 2: Variable Issues- 3: Results
###Code
metro_results_df = pd.DataFrame(results_df4.groupby(by = ['results_flag', 'metro_code']).size()).reset_index().\
rename(columns = {0: 'count'})
metro_results_df['results_flag'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
9. Assessing the variables of valid metros Variables where the p-value and z-value are missing- 63 records
###Code
results_df4.loc[(results_df4['results_flag'] == '3') & (results_df4['z_value'].isnull()) &\
(results_df4['p_value'].isnull()), 'variable_check'] = '1'
results_df4['variable_check'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
Metros with a varibale that has a missing p-value- 17 metros that are valid but there's missing p-values in them
###Code
missing_pvalues_metros_df = results_df4[(results_df4['results_flag'] == '3') & \
(results_df4['variable_check'] == '1')]
print(missing_pvalues_metros_df['metro_code'].nunique())
missing_pvalues_metros_df['metro_name'].value_counts(dropna = False)
###Output
17
###Markdown
Variables that are not statistcially significant- 2693 records
###Code
results_df4.loc[(results_df4['results_flag'] == '3') & (results_df4['p_value'] >= .05),
'variable_check'] = '2'
results_df4['variable_check'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
Filter for variables that are statistcially significant but need more applications- Variable Check 3: 448 records
###Code
results_df4.loc[(results_df4['results_flag'] == '3') & (results_df4['p_value'] < .05) &\
(results_df4['total_count'] < 75), 'variable_check'] = '3'
results_df4['variable_check'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
Filter those records that are statistcially signicant but no disparity- Variable Check 4: 503 records
###Code
results_df4.loc[(results_df4['results_flag'] == '3') & (results_df4['p_value'] < .05) &\
(results_df4['total_count'] >= 75) & (results_df4['odds_ratio'] < 1.45),
'variable_check'] = '4'
results_df4['variable_check'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
Filter those records that are statistcially signicant with a disparity- Variable Check 5: 677 records
###Code
results_df4.loc[(results_df4['results_flag'] == '3') & (results_df4['p_value'] < .05) &\
(results_df4['total_count'] >= 75) & (results_df4['odds_ratio'] >= 1.45),
'variable_check'] = '5'
results_df4['variable_check'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
10. Breakdown of statistcially valid metros for race and ethnicity- 96 NaN are all Intercept variables
###Code
results_df4[(results_df4['results_flag'] == '3')]['variable_check'].value_counts(dropna = False)
###Output
_____no_output_____
###Markdown
Statistically Valid Metros Race and Ethnicity Breakdown:- 2: Not statistically significant -- 295 records- 5: Statistically significant disparity -- 166 records- 3: Not enough applications -- 25 records- 4: Small disparities –– 23 records- 1: Missing p-values -- 3 records
###Code
races = ['black', 'latino', 'asian_cb', 'native']
results_df5 = results_df4[(results_df4['results_flag'] == '3') & (results_df4['variable_name'].isin(races))]
results_df5['variable_check'].value_counts(dropna = False)
results_df5['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Breakdown of 128 Metros
###Code
metro_results = pd.DataFrame(results_df5.groupby(by = ['metro_code', 'variable_check']).size()).reset_index().\
rename(columns = {0: 'count'})
###Output
_____no_output_____
###Markdown
Number of metros with no reliable results:
###Code
reliable_metros_df = metro_results[(metro_results['variable_check'] == '5') |\
(metro_results['variable_check'] == '4')]
reliable_metros = reliable_metros_df['metro_code'].unique()
###Output
_____no_output_____
###Markdown
Number of metros where all racial and ethnic variables are not statistically significant- 21 metros
###Code
print(metro_results[~(metro_results['metro_code'].isin(reliable_metros)) & \
(metro_results['count'] == 4)]['metro_code'].nunique())
metro_results[~(metro_results['metro_code'].isin(reliable_metros)) & \
(metro_results['count'] == 4)]['variable_check'].unique()
###Output
21
###Markdown
Number of metros where all racial and ethnic variables are not reliable- 16 metros- Because not statistically significant, not enough applications, or missing a p-value
###Code
print(metro_results[~(metro_results['metro_code'].isin(reliable_metros)) & \
(metro_results['count'] < 4)]['metro_code'].nunique())
metro_results[~(metro_results['metro_code'].isin(reliable_metros)) & \
(metro_results['count'] < 4)]['variable_check'].unique()
###Output
16
###Markdown
Number of metros with valid results- 91 metros
###Code
valid_results_df = metro_results[(metro_results['variable_check'] == '5') |\
(metro_results['variable_check'] == '4')]
valid_results_df['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Metros with at least one disparity:- 89 metros
###Code
valid_results_df[(valid_results_df['variable_check'] == '5')]['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
Metros where the only valid result is a small disparity- 2 metros
###Code
disparity_metro = valid_results_df[(valid_results_df['variable_check'] == '5')]['metro_code'].unique()
valid_results_df[~(valid_results_df['metro_code'].isin(disparity_metro))]['metro_code'].nunique()
###Output
_____no_output_____
###Markdown
11. Places with small disparities
###Code
print(results_df5[(results_df5['variable_check'] == '4')]['metro_name'].nunique())
results_df5[(results_df5['variable_check'] == '4')]['metro_name'].value_counts(dropna = False)
###Output
18
###Markdown
Places where the only reliable results are small disparities
###Code
small_disparities = valid_results_df[~(valid_results_df['metro_code'].isin(disparity_metro))]['metro_code']
results_df5[(results_df5['metro_code'].isin(small_disparities)) & (results_df5['variable_check'] == '4')]\
[['metro_name', 'variable_name', 'p_value', 'odds_ratio', 'variable_check']]
###Output
_____no_output_____
###Markdown
Smallest disparities overall
###Code
results_df5[(results_df5['variable_check'] == '4')][['metro_name', 'variable_name', 'p_value', 'odds_ratio',
'variable_check']].sort_values(by = ['odds_ratio']).head(5)
###Output
_____no_output_____
###Markdown
Largest disparities overall
###Code
results_df5[(results_df5['variable_check'] == '5')][['metro_name', 'variable_name', 'p_value', 'odds_ratio',
'variable_check']].sort_values(by = ['odds_ratio'],
ascending = False).head(5)
###Output
_____no_output_____
###Markdown
12. Breakdown of metros with disparities Looking at 10 most populous metros
###Code
top10_metros = metros_df2.sort_values(by = ['metro_pop'], ascending = False).head(10)
top10_metros
###Output
_____no_output_____
###Markdown
Of the largest metros, Chicago has the worst disparity for Black applicants. Lenders are 2.5 times more likely to deny Black applicants than similarly qualified White applicants.
###Code
results_df5[(results_df5['metro_code'].isin(top10_metros['metro_code'])) & \
(results_df5['variable_check'] == '5')]\
[['metro_code', 'metro_name', 'metro_pop', 'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']].\
sort_values(by = ['metro_pop', 'odds_ratio'], ascending = False)
###Output
_____no_output_____
###Markdown
Minneapolis is the only metro where all four race and ethnicites are more likely to be denied.
###Code
results_df5[(results_df5['variable_check'] == '5')]['metro_name'].value_counts(dropna = False).head(5)
###Output
_____no_output_____
###Markdown
Key Metros Chicago Results
###Code
results_df5[(results_df5['metro_code'] == '16984')][['metro_code', 'metro_name', 'metro_pop', 'metro_apps',
'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']]
###Output
_____no_output_____
###Markdown
Minneapolis Results
###Code
results_df5[(results_df5['metro_code'] == '33460') & ((results_df5['variable_check'] == '4') |\
(results_df5['variable_check'] == '5'))][['metro_code', 'metro_name', 'metro_pop', 'metro_apps',
'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']]
###Output
_____no_output_____
###Markdown
Charolette Results
###Code
results_df5[(results_df5['metro_code'] == '16740') & ((results_df5['variable_check'] == '4') |\
(results_df5['variable_check'] == '5'))][['metro_code', 'metro_name', 'metro_pop',
'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']]
###Output
_____no_output_____
###Markdown
Largest disparites for Latinos
###Code
results_df5[(results_df5['variable_check'] == '5') & (results_df5['variable_name'] == 'latino')]\
[['metro_code', 'metro_name', 'metro_pop', 'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']].\
sort_values(by = ['odds_ratio'], ascending = False).head(5)
###Output
_____no_output_____
###Markdown
Largest disparites for AAPI
###Code
results_df5[(results_df5['variable_check'] == '5') & (results_df5['variable_name'] == 'asian_cb')]\
[['metro_code', 'metro_name', 'metro_pop', 'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']].\
sort_values(by = ['odds_ratio'], ascending = False).head(5)
###Output
_____no_output_____
###Markdown
Largest disparites for Native American applicants
###Code
results_df5[(results_df5['variable_check'] == '5') & (results_df5['variable_name'] == 'native')]\
[['metro_code', 'metro_name', 'metro_pop', 'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']].\
sort_values(by = ['odds_ratio'], ascending = False).head(5)
###Output
_____no_output_____
###Markdown
Largest disparites for Black applicants
###Code
results_df5[(results_df5['variable_check'] == '5') & (results_df5['variable_name'] == 'black')]\
[['metro_code', 'metro_name', 'metro_pop', 'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']].\
sort_values(by = ['odds_ratio'], ascending = False).head(5)
###Output
_____no_output_____
###Markdown
Largest disparites for Native American applicants
###Code
results_df5[(results_df5['variable_check'] == '5') & (results_df5['variable_name'] == 'native')]\
[['metro_code', 'metro_name', 'metro_pop', 'psuedo_rsquare', 'variable_name', 'p_value', 'odds_ratio']].\
sort_values(by = ['odds_ratio'], ascending = False).head(5)
###Output
_____no_output_____
###Markdown
Summary of findings by race and ethnicity- Black applciants are more likely to be denied in 71 metros- Latinos in 39 metros- Asian/Pacific Islander in 55- Native American in 1 metro
###Code
metro_race_bd = pd.pivot_table(results_df5, index = ['variable_name'], columns = ['variable_check'],
values = ['metro_name'], aggfunc = 'count', fill_value = 0).reset_index()
metro_race_bd.columns = metro_race_bd.columns.droplevel(0)
metro_race_bd.columns.name = None
metro_race_bd.columns = ['Race/Ethnicity', 'Missing', 'Not Sig', 'More Apps', 'Small Disparity', 'Disparity']
metro_race_bd
###Output
_____no_output_____
###Markdown
12. Write out results Add definitions for output
###Code
lookup_dict = {'results_flag': ['1', '2', '3', '3', '3', '3', '3'],
'variable_check': [np.nan, np.nan, '1', '2', '3', '4', '5'],
'reliable_note': ['No results', 'Not enough variance in variables',
'Not statistically significant', 'Not statistically significant',
'Not enough applications', 'Doesn\'t meet level of disparity',
'Statistically significant disparity']}
lookup_df = pd.DataFrame(data=lookup_dict)
lookup_df
results_df6 = pd.merge(results_df4, lookup_df, how = 'left', on = ['results_flag', 'variable_check'])
results_df6.loc[(results_df6['variable_check'] == '4') | (results_df6['variable_check'] == '5'),
'is_reliable'] = True
results_df6.loc[(results_df6['variable_check'] != '4') & (results_df6['variable_check'] != '5'),
'is_reliable'] = False
results_df6['odds_ratio_rd'] = results_df6['odds_ratio'].round(1)
###Output
_____no_output_____
###Markdown
Clean results for all metros:
###Code
races_replace = {'black': 'Black', 'latino': 'Latino', 'native': 'Native American', 'asian_cb': 'AAPI'}
results_df7 = results_df6[(results_df6['variable_name'].isin(races))]\
[['metro_code', 'metro_name', 'metro_pop', 'metro_apps', 'metro_type', 'variable_name', 'total_count', 'loan',
'denied', 'is_reliable', 'reliable_note', 'odds_ratio_rd']].rename(columns = {'odds_ratio_rd': 'odds_ratio'})
results_df8 = results_df7.replace(races_replace)
results_df8.sample(5, random_state = 303)
results_df8.to_csv('../../findings/metro_findings/1_metro_findings_200823.csv', index = False)
###Output
_____no_output_____ |
ensembles/ensembles.ipynb | ###Markdown
Ансамблевые модели Задача классификации В этом практическом задании вы научитесь работать с ансамблевыми моделями. Мы начнем с задачи классификации итальянского вина на предмет его пренадлежности к одному из трех видов. Загрузите датасет `Wine Data Database` с помощью функции `load_wine` из модуля `sklearn.datasets`.
###Code
from sklearn.datasets import load_wine
X, y = load_wine(return_X_y=True)
###Output
_____no_output_____
###Markdown
Модель случайного леса для классификации представлена классом `RandomForestClassifier` из модуля `sklearn.ensemble`. Конструктор этого класса содержит аргумент `n_estimators`, который соответствует колличеству базовых алгоритмов в случайном лесе. Целью этого задания будет настройка этого параметра. Сравните модели случайных лесов с различным числом базовых алгоритмов `{1, 5, 10, 20}`. Что происходит с качеством случайного леса на тестовых данных при увеличении этого числа? Ответом на это задание `answer1` является лучшая оценка качества модели, округленная до трех знаков после запятой. Используйте `accuracy` как метрику качества и скользящий контроль `cross_val_score` как метод оценки качества модели. Установите параметр `cv = StratifiedKFold(4)`. Возьмите среднее значение оценки качества. Для каждой из моделей случайного леса используете `random_state=42` при создании нового экземпляра. *РЕШЕНИЕ*
###Code
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, StratifiedKFold
n_es = [1, 5, 10, 20]
cv = StratifiedKFold(4)
results = {}
for n in n_es:
forest = RandomForestClassifier(n_estimators=n, random_state=42)
cv_score = round(cross_val_score(forest, X, y, cv=cv, scoring='accuracy').mean(), 3)
results[n] = cv_score
answer1 = max(results.values())
answer1
###Output
_____no_output_____
###Markdown
Далее сравните модель градиентного бустинга `GradientBoostingClassifier` из `sklearn.ensemble` с логистической регрессией `LogisticRegression` из `sklearn.linear_model` на этой выборке. Используете параметр `random_state=42` при создании экземпляров классов. Какая из моделей работает лучше? Приведите лучшую оценку, округленную до трех знаков после запятой, в качестве ответа `answer2` на это задание. Какие выводы из этого можно сделать? *РЕШЕНИЕ*
###Code
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
gbcl = GradientBoostingClassifier(random_state=42)
reg = LogisticRegression(random_state=42, solver='liblinear')
gbcl_score = cross_val_score(gbcl, X, y, cv=cv, scoring='accuracy').mean()
reg_score = cross_val_score(reg, X, y, cv=cv, scoring='accuracy').mean()
answer2 = max(gbcl_score, reg_score)
answer2
###Output
_____no_output_____
###Markdown
Задача регрессии Загрузите уже известную вам выборку `Boston House Prices` и разделите ее случайным образом на тренировочную и тестовую выборку. Для этого используете функцию `train_test_split` с параметрами `random_state=54` и `test_size=0.33`. Мы будем сравнивать 4 модели: `RandomForestRegressor`, `GradientBoostingRegressor` из `sklearn.ensemble`, а так же Гребневую регрессию и ЛАССО (`Ridge`, `Lasso` из `sklearn.linear_model`). Обучите каждую модель на тренировочной выборке с параметром `random_state=42` в конструкторе. Какая из моделей показывает наименьшее значение среднеквадратической ошибки на тестовых данных? В качестве ответа `answer3` приведите это значение, округленное до двух цифр после запятой. *РЕШЕНИЕ*
###Code
import numpy as np
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import Ridge, Lasso
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=54, test_size=0.33)
rfr = RandomForestRegressor(random_state=42)
gbr = GradientBoostingRegressor(random_state=42)
ridge = Ridge(random_state=42)
lasso = Lasso(random_state=42)
rfr.fit(X_train, y_train)
gbr.fit(X_train, y_train)
ridge.fit(X_train, y_train)
lasso.fit(X_train, y_train)
y_pred_rfr = rfr.predict(X_test)
y_pred_gbr = gbr.predict(X_test)
y_pred_ridge = ridge.predict(X_test)
y_pred_lasso = lasso.predict(X_test)
mse1 = mean_squared_error(y_test, y_pred_rfr)
mse2 = mean_squared_error(y_test, y_pred_gbr)
mse3 = mean_squared_error(y_test, y_pred_ridge)
mse4 = mean_squared_error(y_test, y_pred_lasso)
answer3 = round(min(mse1, mse2, mse3, mse4), 2)
answer3
###Output
_____no_output_____
###Markdown
Строка с ответами
###Code
output = """Best score (random forest) {0:.3f}
Best score (other algorithms) {1:.3f}
Best score (regression) {2:.2f}"""
print(output.format(answer1, answer2, answer3))
###Output
Best score (random forest) 0.972
Best score (other algorithms) 0.961
Best score (regression) 8.54
###Markdown
Ensemble LearningModels we have discussed previously:- Linear regression. - Logistic regression. - Decision trees. - k-Nearest neighbors. - Support vector machines. Sometimes these models are too simple (even tuned) to provide a model accurate enough to be worth putting into production, especially compute and infrastructure requirements.Another approach is ensemble learning - instead of trying to make a single model that has sufficient performance, maybe using multiple models and combining the results in some way will lead to better overall performance.This will generally follow Python Machine Learning 3rd Edition, Chapter 7.[Image source: SAS Blog](https://blogs.sas.com/content/subconsciousmusings/2017/05/18/stacked-ensemble-models-win-data-science-competitions/)Why bother? A set of classifiers will often have better predictive performance than the individual members. Think of a chorus of young students. Many will sing too low or too high, but on average they sound better together than individually. Goal> Combine different classifers into a meta-classifier that has better generalization performance than each individual classifier alone.Machine Learning with Python 3rd Edition, Page 223.Ask a panel of medical experts about health issues. For the common topics, they will have a tendency to agree. For rarer topics, the specialists, which there are fewer, will be more knowledgeable. The non-specialists opinions for those rare topics will probably be all over the place, but the specialist opinions will likely be in agreement.Sometimes these are also referred to as **meta-models.**The most common underlying models for these are weak decision trees, sometimes stumps, sometimes referred to as **weak learners**. - Computationally pretty fast. - Trees aren't identical and will be better than random guessing. - Can obtain high accuracy with many of these independent trees, similar in concept to random forests. - We aren't restricted to trees though, any classifier (regression) can be used to create ensembles. Common methods- Majority voting. - Bagging. - Boosting. Majority Voting> Really talking about the plurality or mode. Majority doesn't generalize for multi-class problems.[Image source: Machine Learning with Python 3rd Edition, Figure 7.1](https://github.com/rasbt/python-machine-learning-book-3rd-edition/tree/master/ch07) Random Forests: We've already encountered an ensemble model, random forests. Recall a random forest is a collection of individual decision trees, with the prediction being the plurality class predicted from the $C$ individual trees. [Image source: Machine Learning with Python 3rd Edition, Figure 7.2](https://github.com/rasbt/python-machine-learning-book-3rd-edition/tree/master/ch07)- $C$ different classification algorithms can be fit. - These can include different models, different parameter settings, different feature processing, ... Warning: While these can be useful, for larger datasets and models that are computationally intensive by themselves, ensembles can be very expensive. Predicting the label ($mode=plurality$)$$\hat{y} = mode(C_1(x), C_2(x), \dots, C_m(x))$$ RationaleAssuming we have $n$ binary classifiers that are independent and the error rates are uncorrelated, the errors can be expressed as a probability mass function of a binomial distribution.$$P(y\geq k) = \sum_k^n {n \choose k}\epsilon^k(1-\epsilon)^{n-k}=\epsilon_{ensemble}$$i.e., compute the probability that the prediction is wrong. For 11 classifers ($n$) with error rates of 0.25, with a majority being 6 ($k$):$$P(y\geq k) = \sum_6^{11} {11 \choose k}0.25^k(1-0.25)^{11-k}=0.034$$> Assuming the classifiers are (1) independent with (2) uncorrelated errors; the error rate with the ensemble is much lower than the individual error rates ($0.034 \lt 0.25$). Does it always work?No. In the class of binary classifiers, the classifiers have to do better than random guessing. [The below code is from page 226-227 of Machine Learning with Python](https://github.com/rasbt/python-machine-learning-book-3rd-edition/tree/master/ch07).
###Code
from scipy.special import comb
import math
def ensemble_errors(n_classifier, error):
k_start = int(math.ceil(n_classifier / 2.))
probs = [comb(n_classifier, k) * (error ** k) * (1 - error)**(n_classifier - k)
for k in range(k_start, n_classifier+1)
]
return sum(probs)
example_ensemble_error = ensemble_errors(n_classifier=11, error=0.25)
print(f'Error rate: {example_ensemble_error:.2%}')
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
error_range = np.arange(0.0, 1.01, 0.01)
ens_errors = [ensemble_errors(n_classifier=11, error=error) for error in error_range]
plt.plot(error_range, ens_errors, label='Ensemble Error', color='blue', linewidth=2)
plt.plot(error_range, error_range, label='Base Error', linestyle='--', color='orange')
plt.fill_between(error_range[:51], ens_errors[:51], error_range[:51], color='green', alpha=0.5)
plt.xlabel('Base Error')
plt.ylabel('Base/Ensemble Error')
plt.legend(['Ensemble Errors', 'Base Estimator Errors', 'Area of Benefit'])
plt.show()
###Output
_____no_output_____
###Markdown
Weighting ClassifiersThere is an extension that is implemented in [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.VotingClassifier.html) that allows you to weight each classifier with an arbitrary weight.$$\hat{y}=argmax{_t} \sum_j^k w_j\chi_A(C_j(x)=i)$$$w_j$ is a weight associated with the base classifier, $C_j$; $\hat{y}$ is the predicted label; $A$ is the set of unique class labels; $\chi_A$ is the indicator function that determines the class within the classifier.If you had $3$ classifiers and $w_{j's}=\in (0.6, 0.2, 0.2)$, you would effectively be counting the first classifier's label $3$ times, the second classifier once, and the third classifier once.$$\{C_1=1, C_2=0, C_3=0 \} \rightarrow (1,1,1,0,0)$$For the probabilities (`predict_proba`) it is the weighted average of the individual probabilties:$$\hat{y}=argmax{_t} \sum_j^k w_jp_{i,j}$$Assuming we have $3$ binary classifiers, returning the following probabilities for an example:$$C_1(x) \rightarrow [0.9, 0.1], C_2(x) \rightarrow [0.8, 0.2], C_3(x) \rightarrow [0.4, 0.6]$$Using the same weights, we would have:$$p(i_0|x) = 0.6\times0.9 + 0.2\times0.8 + 0.2\times0.4 = 0.78$$$$p(i_1|x) = 0.6\times0.1 + 0.2\times0.2 + 0.2\times0.6 = 0.22$$And the overall result is:$$\hat{y} = argmax{_t}[p(i_0|x),p(i_1|x)] = argmax{_t}[0.78, 0.22]= 0$$Not all probabilties are created equal: Although we can weight the probabilities, they may not be calculated the same way. Recall decision tree probabilties are essentially the class distributions on the leafs and other models produce probabilties from probability mass functions. Regression For regression, it is a bit simpler. Weight the individual $\hat{y}$ values to obtain a weighted average estimate and recalcuate the $r^2$ or other scoring metrics. Classification with Majority Voting on `iris`
###Code
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
iris = datasets.load_iris()
X, y = iris.data[50:, [1,2]], iris.target[50:,]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1)
print(f'Training examples: {X_train.shape[0]:,}')
print(f'Test examples: {X_test.shape[0]:,}')
###Output
Training examples: 60
Test examples: 40
###Markdown
- Logistic regression, - Decision tree, and - k-nearest neighbors
###Code
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty='l2', C=0.001, solver='lbfgs', random_state=0)
clf2 = DecisionTreeClassifier(max_depth=1, criterion='entropy', random_state=1)
clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')
pipe1 = Pipeline([('scaler', StandardScaler()),('logreg', clf1)])
pipe2 = Pipeline([('scaler', StandardScaler()),('tree', clf2)])
pipe3 = Pipeline([('scaler', StandardScaler()),('knn', clf3)])
labs = ['Logistic Regression', 'Decision Tree', 'k-Nearest Neighbors']
clfs = [pipe1, pipe2, pipe3]
clfs = zip(labs, clfs)
for lab, clf in clfs:
scores = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring='roc_auc')
print(f'ROC AUC {scores.mean():.2f} (+/- {scores.std():.2f}) [{lab}]')
###Output
ROC AUC 0.95 (+/- 0.08) [Logistic Regression]
ROC AUC 0.93 (+/- 0.08) [Decision Tree]
ROC AUC 0.93 (+/- 0.11) [k-Nearest Neighbors]
###Markdown
VotingClassifier from scikit-learn.- Set `voting=soft` to use the probabilities to inform the class prediction. `voting=hard` will use the mode of the predicted classes.
###Code
from sklearn.ensemble import VotingClassifier
ems = [('lr', pipe1),('dt', pipe2),('knn', pipe3)]
clf4 = VotingClassifier(estimators= ems, weights=None, voting='soft')
scores = cross_val_score(estimator=clf4, X=X_train, y=y_train, cv=10, scoring='roc_auc')
print(f'ROC AUC {scores.mean():.2f} (+/- {scores.std():.2f}) [Ensemble]')
###Output
ROC AUC 0.99 (+/- 0.03) [Ensemble]
###Markdown
Higher area under the curve, with smaller variations between the folds! That was on the training though, need to evaluate on the test data. Recall [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)- We can tune a VotingClassifier the same we way tune individual ones.
###Code
clf4
from sklearn.model_selection import GridSearchCV
params = {'lr__logreg__C':[0.001, 0.1, 1, 10],
'dt__tree__max_depth': [1,2,3],
'knn__knn__n_neighbors': [1,2,3]
}
vc_gs = GridSearchCV(estimator=clf4, param_grid=params, scoring='roc_auc', refit=True)
vc_gs = vc_gs.fit(X_train, y_train)
vc_gs_score = vc_gs.score(X_test, y_test)
print(f'VotingClassifier Test ROC AUC: {vc_gs_score:.2%}')
###Output
VotingClassifier Test ROC AUC: 96.21%
###Markdown
Bagging (also known as Bootstrap Aggregating)- Another ensemble learning technique, similar to the VotingClassifier. - Instead of using the same dataset for training, bootstrap samples are drawn. - Aggregation should reduce bias and variation in our results. [Image source: Machine Learning with Python 3rd Edition, Figure 7.6](https://github.com/rasbt/python-machine-learning-book-3rd-edition/tree/master/ch07)> This can improve accuracy and decrease overfitting when models are unstable. [See Breiman's paper on bagging.](https://www.stat.berkeley.edu/~breiman/bagging.pdf)> Bagging won't help with bias or underfitting models. These are commonly used with unpruned decision trees, which by themselves are very prone to overfitting. Logistic Regression as Benchmark
###Code
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
clf1 = LogisticRegression(penalty='l2', C=0.001, solver='lbfgs', random_state=0)
pipe1 = Pipeline([('scaler', StandardScaler()),('logreg', clf1)])
params = {'logreg__C':[0.001, 0.1, 1, 10]}
lrcv = GridSearchCV(pipe1, param_grid=params, cv=10, scoring='roc_auc')
lrcv = lrcv.fit(X_train, y_train)
lrcv.score(X_test, y_test)
lrcv.best_estimator_
###Output
_____no_output_____
###Markdown
Average deviation on test scores:
###Code
lrcv.cv_results_['std_test_score'].mean()
###Output
_____no_output_____
###Markdown
Bagging:
###Code
from sklearn.ensemble import BaggingClassifier
dt = DecisionTreeClassifier()
bclf = BaggingClassifier(dt)
params = {'base_estimator__max_depth':[1, 2, 4, 6], 'max_samples':[0.1, 0.2, 0.3, 0.4, 0.5]}
blrcv = GridSearchCV(bclf, param_grid=params, cv=10, scoring='roc_auc')
blrcv = blrcv.fit(X_train, y_train)
blrcv.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Average deviation of test scores:
###Code
blrcv.cv_results_['std_test_score'].mean()
###Output
_____no_output_____
###Markdown
> `iris` is pretty stable, so no benefit, but you can try bagging if overfitting seems to be an issue. However, we do see lower standard deviations in the test scores across the folds - less bias in bagging. Takeaways- Performance was about the same with bagging. - Variance likely lower in our estimates. Adaptive Boosting (AdaBoost)- Boosting refers to ensembles that combine weak learners into a strong learner. - Generally, the weak decision trees are used as the learner. - Generally, we sequentially train weak models and the subsequent try to correct the prior model's mistakes. - AdaBoost and Gradient Boosting are the most popular. - Since the learners are dependent on previous learners, this is not able to be parallelized and won't scale as well as bagging. Original Process- Draw random subset of examples, $d_1$, without replacement, and train a weak learner, $C_1$. - Draw second sample, $d_2$, and add 50% of samples that were misclassified in $C_1$ to a second weak learner, $C_2$. - Find examples, $d_3$ that $C_1$ and $C_2$ disagreed on and train another weak learner, $C_3$. - Combined $C_1$, $C_2$, and $C_3$ via majority voting. New ProcessNewer process work much in the same way as above, except all examples are used in each learner and those misclassified are weighted heavier.In the image below:1. Represents the initial learner with all examples, equal-weighted. 2. Assign larger weight to misclassified examples from first learner (bigger circles) and train another learner. 3. Assign yet larger weights to misclassified examples from second learner and train another learner. 4. Combine three learners by weighted majority vote. [Image source: Machine Learning with Python 3rd Edition, Figure 7.9](https://github.com/rasbt/python-machine-learning-book-3rd-edition/tree/master/ch07) Pseudo Code- Set weight vector, $w$, to uniform weights, $\sum_iw_i=1$. - For $j$ in $m$ boosting rounds: - Train a weak learner: $C_j=train(X,y,w)$. - Predict labels: $\hat{y}=predict(C_j,X)$. - Compute weighted error rate: $\epsilon=w(\hat{y}\ne y)$. - Compute coefficient: $\alpha_j=0.5log\frac{1-\epsilon}{\epsilon}$ - Update weights: $w:=w\times exp(-\alpha_j x \hat{y} \times y)$ - Normalize weights: $w:=w/\sum_iw_i$ - Compute final predictions:$$\hat{y}=\sum{_{j=1}^{m}}(\alpha_j \times predict(C_j,X)>0)$$ Wine ExampleData from [UCI's Machine Learning Repository](http://archive.ics.uci.edu/ml/datasets/Wine).
###Code
import pandas as pd
wine = pd.read_csv('data/wine.data', header=None)
wineCols = ['Class label', 'Alcohol', 'Malic Acid', 'Ash', 'Alcalinity of Ash',
'Magnesium', 'Total Phenols', 'Flavanoid', 'Nonflavanoid Phenols',
'Proanthocyanins', 'Color Intensity', 'Hue', 'OD280-OD315', 'Proline']
wine.columns = wineCols
wine.head()
wine['Class label'].value_counts()
from sklearn.model_selection import train_test_split
wineFeatures = [x for x in wineCols if x != 'Class label']
wineX = wine[wineFeatures]
winey = wine['Class label']
wX_train, wX_test, wy_train, wy_test = train_test_split(wineX, winey, test_size=0.3)
print(f'Training examples: {wX_train.shape[0]}')
print(f'Test examples: {wX_test.shape[0]}')
###Output
Training examples: 124
Test examples: 54
###Markdown
Determine performance on Decision Tree Stumps ($depth=1$)
###Code
from sklearn.metrics import accuracy_score
tree = DecisionTreeClassifier(criterion='entropy', random_state=1, max_depth=1)
tree = tree.fit(wX_train, wy_train)
tree_train_predict = tree.predict(wX_train)
tree_test_predict = tree.predict(wX_test)
tree_train_score = accuracy_score(wy_train, tree_train_predict)
tree_test_score = accuracy_score(wy_test, tree_test_predict)
print(f'Tree Training Score: {tree_train_score:.2%}')
print(f'Tree Test Score: {tree_test_score:.2%}')
###Output
Tree Training Score: 63.71%
Tree Test Score: 50.00%
###Markdown
Use AdaBoost, with the Stump Decision Tree as the base estimator
###Code
from sklearn.ensemble import AdaBoostClassifier
ada = AdaBoostClassifier(base_estimator=tree, n_estimators=500, learning_rate=0.1, random_state=1)
ada = ada.fit(wX_train.values, wy_train)
ada_train_predict = ada.predict(wX_train.values)
ada_test_predict = ada.predict(wX_test.values)
ada_train_score = accuracy_score(wy_train, ada_train_predict)
ada_test_score = accuracy_score(wy_test, ada_test_predict)
print(f'AdaBoost Training Score: {ada_train_score:.2%}')
print(f'AdaBoost Test Score: {ada_test_score:.2%}')
###Output
AdaBoost Training Score: 98.39%
AdaBoost Test Score: 83.33%
###Markdown
> AdaBoost perfectly assigns all the training data. Probably overfitting - see the drop in the test score. Want would to run through cross-validation to ensure stability of those high scores. The absolute gap is bigger in the AdaBoost classifier, so we introduced additional model bias.**AdaBoost Hyperparameters to Tune:** - Number of estimators. - Learning rate. Gradient BoostingSimilar to AdaBoost, in that it sequentially adds learners to an ensemble. Instead of changing weights after after iteration, this tires to fit the new predictor to the residual errors made by the previous learner.Example from [Hands on Machine Learning with Scikit-Learn, Keras & TensorFlow, pages 204-205](https://github.com/ageron/handson-ml2/blob/master/07_ensemble_learning_and_random_forests.ipynb)Generate quadratic data with a little noise:
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
X = np.random.rand(100, 1) - 0.5
y = 3*X[:, 0]**2 + 0.05 * np.random.randn(100)
plt.plot(X,y, 'ro')
plt.show()
###Output
_____no_output_____
###Markdown
Train an initial weak Regression Tree
###Code
from sklearn.tree import DecisionTreeRegressor
tree_reg1 = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg1.fit(X, y)
###Output
_____no_output_____
###Markdown
Calculate residuals and use those as the target variable in another weak Regression Tree
###Code
y2 = y - tree_reg1.predict(X)
tree_reg2 = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg2.fit(X, y2)
###Output
_____no_output_____
###Markdown
Calculate residuals and use those as the target variable in another weak Regression Tree
###Code
y3 = y2 - tree_reg2.predict(X)
tree_reg3 = DecisionTreeRegressor(max_depth=2, random_state=42)
tree_reg3.fit(X, y3)
###Output
_____no_output_____
###Markdown
New predictions can be made by summing up the residuals from the three trees
###Code
X_new = np.array([[0.8]])
y_pred = sum(tree.predict(X_new) for tree in (tree_reg1, tree_reg2, tree_reg3))
def plot_predictions(regressors, X, y, axes, label=None, style="r-", data_style="b.", data_label=None):
x1 = np.linspace(axes[0], axes[1], 500)
y_pred = sum(regressor.predict(x1.reshape(-1, 1)) for regressor in regressors)
plt.plot(X[:, 0], y, data_style, label=data_label)
plt.plot(x1, y_pred, style, linewidth=2, label=label)
if label or data_label:
plt.legend(loc="upper center", fontsize=16)
plt.axis(axes)
plt.figure(figsize=(11,11))
plt.subplot(321)
plot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h_1(x_1)$", style="g-", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Residuals and tree predictions", fontsize=16)
plt.subplot(322)
plot_predictions([tree_reg1], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Ensemble predictions", fontsize=16)
plt.subplot(323)
plot_predictions([tree_reg2], X, y2, axes=[-0.5, 0.5, -0.5, 0.5], label="$h_2(x_1)$", style="g-", data_style="k+", data_label="Residuals")
plt.ylabel("$y - h_1(x_1)$", fontsize=16)
plt.subplot(324)
plot_predictions([tree_reg1, tree_reg2], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1) + h_2(x_1)$")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.subplot(325)
plot_predictions([tree_reg3], X, y3, axes=[-0.5, 0.5, -0.5, 0.5], label="$h_3(x_1)$", style="g-", data_style="k+")
plt.ylabel("$y - h_1(x_1) - h_2(x_1)$", fontsize=16)
plt.xlabel("$x_1$", fontsize=16)
plt.subplot(326)
plot_predictions([tree_reg1, tree_reg2, tree_reg3], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1) + h_2(x_1) + h_3(x_1)$")
plt.xlabel("$x_1$", fontsize=16)
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.show()
###Output
_____no_output_____
###Markdown
scikit-learn Equivalent:
###Code
from sklearn.ensemble import GradientBoostingRegressor
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0)
gbrt = gbrt.fit(X, y)
plot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Gradient Boost from scikit-learn", fontsize=16)
plt.show()
###Output
_____no_output_____
###Markdown
Sensitivity of Learning Rates and Estimators
###Code
from sklearn.ensemble import GradientBoostingRegressor
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=0.01)
gbrt = gbrt.fit(X, y)
plot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Gradient Boost from scikit-learn ($\\alpha=0.01$, $estimators=3$)", fontsize=16)
plt.show()
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=0.1)
gbrt = gbrt.fit(X, y)
plot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Gradient Boost from scikit-learn ($\\alpha=0.1$, $estimators=3$)", fontsize=16)
plt.show()
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=300, learning_rate=0.01)
gbrt = gbrt.fit(X, y)
plot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Gradient Boost from scikit-learn ($\\alpha=0.01$, $estimators=300$)", fontsize=16)
plt.show()
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=300, learning_rate=0.1)
gbrt = gbrt.fit(X, y)
plot_predictions([gbrt], X, y, axes=[-0.5, 0.5, -0.1, 0.8], label="$h(x_1) = h_1(x_1)$", data_label="Training set")
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.title("Gradient Boost from scikit-learn ($\\alpha=0.1$, $estimators=300$)", fontsize=16)
plt.show()
###Output
_____no_output_____
###Markdown
- Learning rate affects the contributions from each tree. Lower values will require more trees, but generalization will be better (shrinkage). - Increasing the number of trees can lead to overfitting. Use early stopping to determine an optimal number of trees. Early stopping looks at the validation errors as the number of trees increase.
###Code
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
X_train, X_val, y_train, y_val = train_test_split(X, y, random_state=49)
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120, random_state=42)
gbrt.fit(X_train, y_train)
errors = [mean_squared_error(y_val, y_pred)
for y_pred in gbrt.staged_predict(X_val)]
bst_n_estimators = np.argmin(errors) + 1
gbrt_best = GradientBoostingRegressor(max_depth=2, n_estimators=bst_n_estimators, random_state=42)
gbrt_best.fit(X_train, y_train)
min_error = np.min(errors)
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(errors, "b.-")
plt.plot([bst_n_estimators, bst_n_estimators], [0, min_error], "k--")
plt.plot([0, 120], [min_error, min_error], "k--")
plt.plot(bst_n_estimators, min_error, "ko")
plt.text(bst_n_estimators, min_error*1.2, "Minimum", ha="center", fontsize=14)
plt.axis([0, 120, 0, 0.01])
plt.xlabel("Number of trees")
plt.ylabel("Error", fontsize=16)
plt.title("Validation error", fontsize=14)
plt.subplot(122)
plot_predictions([gbrt_best], X, y, axes=[-0.5, 0.5, -0.1, 0.8])
plt.title("Best model (%d trees)" % bst_n_estimators, fontsize=14)
plt.ylabel("$y$", fontsize=16, rotation=0)
plt.xlabel("$x_1$", fontsize=16)
plt.show()
###Output
_____no_output_____
###Markdown
More tangible example using customer churn data
###Code
tel = pd.read_csv('data/telco.csv')
tel.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 7043 entries, 0 to 7042
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 customerID 7043 non-null object
1 gender 7043 non-null object
2 SeniorCitizen 7043 non-null int64
3 Partner 7043 non-null object
4 Dependents 7043 non-null object
5 tenure 7043 non-null int64
6 PhoneService 7043 non-null object
7 MultipleLines 7043 non-null object
8 InternetService 7043 non-null object
9 OnlineSecurity 7043 non-null object
10 OnlineBackup 7043 non-null object
11 DeviceProtection 7043 non-null object
12 TechSupport 7043 non-null object
13 StreamingTV 7043 non-null object
14 StreamingMovies 7043 non-null object
15 Contract 7043 non-null object
16 PaperlessBilling 7043 non-null object
17 PaymentMethod 7043 non-null object
18 MonthlyCharges 7043 non-null float64
19 TotalCharges 7043 non-null object
20 Churn 7043 non-null object
dtypes: float64(1), int64(2), object(18)
memory usage: 1.1+ MB
###Markdown
Churn distribution
###Code
tel.Churn.value_counts().plot.barh()
plt.title('Customer Churn')
plt.show()
###Output
_____no_output_____
###Markdown
- Unbalanced, but not as severe as when seen before. Cardinality Check
###Code
tel.select_dtypes('object').nunique().sort_values().plot.barh()
plt.title('Unique Values of Objects')
plt.show()
tel.select_dtypes('object').nunique().sort_values()
###Output
_____no_output_____
###Markdown
- customerID is something we won't want to use for modeling. - TotalCharges might have an incorrect type.
###Code
tel.TotalCharges.head()
tel['TotalCharges'] = pd.to_numeric(tel['TotalCharges'], errors='coerce')
tel['TotalCharges'].describe()
###Output
_____no_output_____
###Markdown
Numerical Columns
###Code
tel.select_dtypes(['int64','float64']).hist()
plt.show()
###Output
_____no_output_____
###Markdown
- SeniorCitizen looks like a dummy. - tenure and MonthlyCharges seem to be truncated to some degree. Missing value check
###Code
tel.isna().sum()
###Output
_____no_output_____
###Markdown
- Will need an imputer; missing values likely caused by the type conversion. - If there were a substantial amount of NAs due to the type conversion, we may have to handle it more elegantly, especially if the data generation process will allow this field to come in as objects. Plan- Baseline with logistic regression. - Create a straight decision tree. - AdaBoost. - GradientBoost. We'll use `accuracy` as a metric, but you could make the arguement recall may be what we'd want to optimize for since we could proactively reach-out to those customers and try to get them to stay on the service with discounts or other incentives.
###Code
nums = tel.dtypes[tel.dtypes != 'object'].index.tolist()
cats = tel.dtypes[tel.dtypes == 'object'].index.tolist()
cats = [x for x in cats if x not in ['Churn', 'customerID']]
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([('impute_missing', SimpleImputer(strategy='median')),
('standardize_num', StandardScaler())
])
cat_pipeline = Pipeline([('impute_missing_cats', SimpleImputer(strategy='most_frequent')),
('create_dummies_cats', OneHotEncoder(handle_unknown='ignore', drop='first'))])
processing_pipeline = ColumnTransformer(transformers=[('proc_numeric', num_pipeline, nums),
('create_dummies', cat_pipeline, cats)])
print('Pipeline Created')
###Output
Pipeline Created
###Markdown
Split Data
###Code
from sklearn.model_selection import train_test_split
y = np.where(tel.Churn == 'Yes', 1, 0)
tX_train, tX_test, ty_train, ty_test = train_test_split(tel[nums+cats],
y,
test_size=0.2
)
print(f'Training examples: {tX_train.shape[0]:,}')
print(f'Test examples: {tX_test.shape[0]:,}')
###Output
Training examples: 5,634
Test examples: 1,409
###Markdown
Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
p1 = Pipeline([('processing', processing_pipeline),
('lr', LogisticRegression(solver='liblinear'))])
params = {'lr__C': [0.01, 0.1, 1, 10]}
lr_gscv = GridSearchCV(p1, param_grid=params, cv=10, scoring='accuracy', refit=True)
lr_gscv = lr_gscv.fit(tX_train, ty_train)
print(f'Validation score: {lr_gscv.best_score_:.2%}')
lr_pred = lr_gscv.predict(tX_test)
print(f'Test score: {lr_gscv.score(tX_test, ty_test):.2%}')
###Output
Validation score: 80.80%
Test score: 80.34%
###Markdown
Decision Tree
###Code
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
p2 = Pipeline([('processing', processing_pipeline),
('dt', DecisionTreeClassifier())])
params = {'dt__max_depth': [1, 5, 10, 15, 25],
'dt__min_samples_split': [3, 10, 15]}
dt_gscv = GridSearchCV(p2, param_grid=params, cv=10, scoring='accuracy', refit=True)
dt_gscv = dt_gscv.fit(tX_train, ty_train)
print(f'Validation score: {dt_gscv.best_score_:.2%}')
dt_pred = dt_gscv.predict(tX_test)
print(f'Test score: {dt_gscv.score(tX_test, ty_test):.2%}')
###Output
Validation score: 79.27%
Test score: 79.35%
###Markdown
AdaBoost
###Code
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
# multi-step pipelines don't play as nice with AdaBoost
xt = processing_pipeline.fit_transform(tX_train)
params = {'base_estimator__max_depth': [1,2],
'n_estimators': [50, 100, 200, 400]
}
#ABC = AdaBoostClassifier(base_estimator=p2)
ABC = AdaBoostClassifier(DecisionTreeClassifier())
ad_gscv = GridSearchCV(ABC, param_grid = params, cv=10, scoring='accuracy')
ad_gscv = ad_gscv.fit(xt, ty_train)
print(f'Validation score: {ad_gscv.best_score_:.2%}')
xtt = processing_pipeline.transform(tX_test)
ad_pred = ad_gscv.predict(xtt)
print(f'Test score: {ad_gscv.score(xtt, ty_test):.2%}')
###Output
Validation score: 80.48%
Test score: 80.70%
###Markdown
Gradient Boosting
###Code
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
p3 = Pipeline([('processing', processing_pipeline),
('gb', GradientBoostingClassifier())])
params = {'gb__max_depth': [1,2,3],
'gb__n_estimators': [50, 100, 200]
}
gb_gscv = GridSearchCV(p3, param_grid = params, cv=10, scoring='accuracy')
gb_gscv = gb_gscv.fit(tX_train, ty_train)
print(f'Validation score: {gb_gscv.best_score_:.2%}')
gb_pred = gb_gscv.predict(tX_test)
print(f'Test score: {gb_gscv.score(tX_test, ty_test):.2%}')
print(f'Logistic Regression: {lr_gscv.score(tX_test, ty_test):.2%}')
print(f'Decision Tree: {dt_gscv.score(tX_test, ty_test):.2%}')
print(f'AdaBoost: {ad_gscv.score(xtt, ty_test):.2%}')
print(f'Gradient Boosting: {gb_gscv.score(tX_test, ty_test):.2%}')
###Output
Logistic Regression: 80.34%
Decision Tree: 79.35%
AdaBoost: 80.70%
Gradient Boosting: 80.34%
|
Assignments/DS13_TL_BTR_LUIS_URENA_UNIT2_3_1.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 3, Module 1*--- Define ML problemsYou will use your portfolio project dataset for all assignments this sprint. AssignmentComplete these tasks for your project, and document your decisions.- [ ] Choose your target. Which column in your tabular dataset will you predict?- [ ] Is your problem regression or classification?- [ ] How is your target distributed? - Classification: How many classes? Are the classes imbalanced? - Regression: Is the target right-skewed? If so, you may want to log transform the target.- [ ] Choose your evaluation metric(s). - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy? - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics?- [ ] Choose which observations you will use to train, validate, and test your model. - Are some observations outliers? Will you exclude them? - Will you do a random split or a time-based split?- [ ] Begin to clean and explore your data.- [ ] Begin to choose which features, if any, to exclude. Would some features "leak" future information?If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset.Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393)
###Code
import pandas as pd
import numpy as np
data = 'https://raw.githubusercontent.com/LuisFelipeUrena/DS-Unit-2-Applied-Modeling/master/data/apartments/renthop-nyc.csv'
df = pd.read_csv(data)
df.head()
df.tail()
df.isnull().sum()
df.describe(include='number')
df.describe(exclude='number')
df.shape
df['interest_level'].describe()
#mayority class, the target i am trying to predict is intrest level
df['interest_level'].value_counts(normalize=True)
#irrelevant columns putting them here just in case i want to drop others
irrelevant = ['created','description','display_address','street_address','latitude','longitude']
df = df.drop(irrelevant,axis=1)
print('Shape',df.shape)
df.head()
#feature engineering the allowance of dogs and cats
cats_and_dogs = df['cats_allowed'] & df['dogs_allowed']
df['cats_and_dogs'] = cats_and_dogs
df['no_pets'] = cats_and_dogs == 0
df.head()
df.isnull().sum()
target = ['interest_level']
from sklearn.model_selection import train_test_split
#split the data into test train val, by random sampling
train , test = train_test_split(df,train_size=0.80,test_size=0.20,
random_state=42
)
train.shape , test.shape
train , val = train_test_split(train,train_size=0.80,test_size=0.20,
random_state=42)
# i ran this command to get a list of all features tu use them as features for my set
#test.columns.drop(target)
features = ['bathrooms', 'bedrooms', 'price', 'elevator', 'cats_allowed',
'hardwood_floors', 'dogs_allowed', 'doorman', 'dishwasher', 'no_fee',
'laundry_in_building', 'fitness_center', 'pre-war', 'laundry_in_unit',
'roof_deck', 'outdoor_space', 'dining_room', 'high_speed_internet',
'balcony', 'swimming_pool', 'new_construction', 'terrace', 'exclusive',
'loft', 'garden_patio', 'wheelchair_access', 'common_outdoor_space',
'cats_and_dogs', 'no_pets']
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[target]
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(
RandomForestClassifier(max_depth=10)
)
pipeline.fit(X_train,y_train)
pipeline.score(X_val,y_val)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
param_distribution = {'randomforestclassifier__max_depth':[10,100,300,400],
'randomforestclassifier__max_features':range(10,30),
'randomforestclassifier__criterion':['gini','entropy']}
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distribution,
n_iter=5,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(X_train,y_train)
pipeline = search.best_estimator_
pipeline.score(X_val,y_val)
import seaborn as sns
y = df['interest_level'].value_counts(normalize=True)
y
sns.distplot(y)
y_log = np.log1p(y)
sns.distplot(y_log)
search.best_estimator_
from sklearn.metrics import classification_report
y_tpred = pipeline.predict(X_test)
classification_report(y_val,y_tpred)
###Output
_____no_output_____ |
src/user_guide/path_environ.ipynb | ###Markdown
Title * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * a Change system $PATH There are cases where you would like to use a specific version of programs for your workflow but do not want to change the system `$PATH` because of its global effect. In this case you can prepend pathes to these executables to `$PATH` using option `-b`.The following example first cretes a executable `ls` in `tmp` with an `echo` command. Using the option `-b tmp`, the `tmp` directory is prefixed to the system `$PATH` before the workflow is executed. The consequence is that this fake `ls` supersedes the system `ls` when `ls` is called in `step_10` of the workflow.
###Code
%sandbox
!mkdir tmp
!echo "#!/bin/bash" > tmp/ls
!echo "echo This is fake ls" >> tmp/ls
!chmod +x tmp/ls
%run -b tmp
[10]
sh:
ls
###Output
[95mWARNING[0m: [95mOption -b is deprecated. Please use option "-r host" with workflow_template instead (#1322).[0m
###Markdown
The `-b` option has a default value `~/.sos/bin`, so any command under `~/.sos/bin` would be executed (before system command with the same name) even if the executables are not under system `$PATH`. This feature allows you to create commands that would only be used inside SoS-scripts, and more interestingly, allows you to create executable or install programs on-the-fly.For example, step 20 of the following workflow depends on an executable `lls` that is not a system executable.
###Code
%sandbox
!rm -f ~/.sos/bin/lls
[install_lls: provides=executable('lls')]
run:
echo "#!/bin/bash" > ~/.sos/bin/lls
echo "echo This is lls" >> ~/.sos/bin/lls
chmod +x ~/.sos/bin/lls
[20]
depends: executable('lls')
run:
lls
###Output
_____no_output_____
###Markdown
Title * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * a Change system $PATH There are cases where you would like to use a specific version of programs for your workflow but do not want to change the system `$PATH` because of its global effect. In this case you can prepend pathes to these executables to `$PATH` using option `-b`.The following example first cretes a executable `ls` in `tmp` with an `echo` command. Using the option `-b tmp`, the `tmp` directory is prefixed to the system `$PATH` before the workflow is executed. The consequence is that this fake `ls` supersedes the system `ls` when `ls` is called in `step_10` of the workflow.
###Code
%sandbox
!mkdir tmp
!echo "#!/bin/bash" > tmp/ls
!echo "echo This is fake ls" >> tmp/ls
!chmod +x tmp/ls
%run -b tmp
[10]
sh:
ls
###Output
[95mWARNING[0m: [95mOption -b is deprecated. Please use option "-r host" with workflow_template instead (#1322).[0m
###Markdown
The `-b` option has a default value `~/.sos/bin`, so any command under `~/.sos/bin` would be executed (before system command with the same name) even if the executables are not under system `$PATH`. This feature allows you to create commands that would only be used inside SoS-scripts, and more interestingly, allows you to create executable or install programs on-the-fly.For example, step 20 of the following workflow depends on an executable `lls` that is not a system executable.
###Code
%sandbox
!rm -f ~/.sos/bin/lls
[install_lls: provides=executable('lls')]
run:
echo "#!/bin/bash" > ~/.sos/bin/lls
echo "echo This is lls" >> ~/.sos/bin/lls
chmod +x ~/.sos/bin/lls
[20]
depends: executable('lls')
run:
lls
###Output
_____no_output_____
###Markdown
Title * **Difficulty level**: easy* **Time need to lean**: 10 minutes or less* **Key points**: * a Change system $PATH There are cases where you would like to use a specific version of programs for your workflow but do not want to change the system `$PATH` because of its global effect. In this case you can prepend pathes to these executables to `$PATH` using option `-b`.The following example first cretes a executable `ls` in `tmp` with an `echo` command. Using the option `-b tmp`, the `tmp` directory is prefixed to the system `$PATH` before the workflow is executed. The consequence is that this fake `ls` supersedes the system `ls` when `ls` is called in `step_10` of the workflow.
###Code
%sandbox
!mkdir tmp
!echo "#!/bin/bash" > tmp/ls
!echo "echo This is fake ls" >> tmp/ls
!chmod +x tmp/ls
%run -b tmp
[10]
sh:
ls
###Output
This is fake ls
###Markdown
The `-b` option has a default value `~/.sos/bin`, so any command under `~/.sos/bin` would be executed (before system command with the same name) even if the executables are not under system `$PATH`. This feature allows you to create commands that would only be used inside SoS-scripts, and more interestingly, allows you to create executable or install programs on-the-fly.For example, step 20 of the following workflow depends on an executable `lls` that is not a system executable.
###Code
%sandbox
!rm -f ~/.sos/bin/lls
[install_lls: provides=executable('lls')]
run:
echo "#!/bin/bash" > ~/.sos/bin/lls
echo "echo This is lls" >> ~/.sos/bin/lls
chmod +x ~/.sos/bin/lls
[20]
depends: executable('lls')
run:
lls
###Output
_____no_output_____ |
{{cookiecutter.project_slug}}/4_analysis/test_1/notebooks/test_1.ipynb | ###Markdown
{{cookiecutter.project_name}}{{cookiecutter.project_short_description}} Data Sources- file1 : Description of where this file came from Changes- {% now 'utc', '%m-%d-%Y' %} : Started project
###Code
import pandas as pd
from pathlib import Path
from datetime import datetime
###Output
_____no_output_____
###Markdown
File Locations
###Code
today = datetime.today()
in_file = Path.cwd() / "data" / "raw" / "FILE1"
summary_file = Path.cwd() / "data" / "processed" / f"summary_{today:%b-%d-%Y}.pkl"
df = pd.read_csv(in_file)
###Output
_____no_output_____
###Markdown
Column Cleanup- Remove all leading and trailing spaces- Rename the columns for consistency.
###Code
# https://stackoverflow.com/questions/30763351/removing-space-in-dataframe-python
df.columns = [x.strip() for x in df.columns]
cols_to_rename = {'col1': 'New_Name'}
df.rename(columns=cols_to_rename, inplace=True)
###Output
_____no_output_____
###Markdown
Clean Up Data Types
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Data Manipulation Save output file into processed directorySave a file in the processed directory that is cleaned properly. It will be read in and used later for further analysis.Other options besides pickle include:- feather- msgpack- parquet
###Code
df.to_pickle(summary_file)
###Output
_____no_output_____ |
scripts/exercises/01_Exercise_Basics_Solutions.ipynb | ###Markdown
Basics 1 Exercise 1a) Write some code to calculate the Fahrenheit value, given a Celsius value.b) Print out the results in a sentence.°F = °C * 9 / 5 + 32
###Code
degree = input("Enter a celsius number: ")
try:
degree = float(degree)
F = degree * 9/5 + 32
print("{} celsius are {} fahrenheit".format(degree,str(F)))
except ValueError:
print("Please enter a number!")
def calculate_fahrenheit(celsius):
fahrenheit = celsius * 9/5 + 32
return fahrenheit
calculate_fahrenheit(10)
###Output
_____no_output_____
###Markdown
Exercise 2a) Find out if the sequence is all alphabetic and upper case.b) Count all DNA nucleotides of a given sequence.c) Find out if the sequence contains a start codon and find the first position.d) Calculate the GC content and print the results.
###Code
seq = 'ATACAGTCGATCGTAGCTATGATACGATCGTAGCTACGTATACGTACGTCGGCGAT'\
'GCATGCTAACAAATCGATCGTACGTACTGATCGATCGTACGTACGTAGCTtgatcgtacgtcagttgac'
print(seq.isalpha())
print(seq.isupper())
#given sequence = any random sequence
#lets try "gat"
print(seq.count("gat"))
#but if the whole string has small characters the result is different
print(seq.lower().count("gat"))
print("Index of first occurence of start codon: {}".format(seq.index("ATG")))
seq[18:21]
seqUPPER = seq.upper()
gs = seqUPPER.count('G')
cs = seqUPPER.count('C')
gc_content = (gs + cs )/len(seqUPPER)
print("The GC content of the sequence is: {}%".format(round(gc_content*100,2)))
###Output
The GC content of the sequence is: 46.4%
###Markdown
Exercise 3a) Transcribe the DNA sequence into an RNA sequence. ATG -> AUGb) Create the complementary strain of the DNA sequence. ATG -> TAC
###Code
seq
seqUPPER
seqRNA = seqUPPER.replace("T","U")
seqRNA
#using a loop over a seq as a list - a little bit more advanced
seq = seq.upper()
seq_list = list(seq)
for i in range(len(seq_list)):
if seq_list[i] == 'A':
seq_list[i] = 'T'
elif seq_list[i] == 'T':
seq_list[i] = 'A'
elif seq_list[i] == 'C':
seq_list[i] = 'G'
else:
seq_list[i] = 'C'
seq_comp = ''.join(seq_list)
###Output
_____no_output_____
###Markdown
Exercise 4a) Count the number of observations.b) Show the position of the Fuchs in the list. (Do some googling).c) Control this by printing the position you found out.d) Sort your observation list.e) Count the number of different animals.
###Code
obser = 'Fuchs,Elefant,Hund,Hund,Mensch,Fuchs,Glurak'
lst = obser.split(',')
print("First occurence of Fuchs: {}".format(lst.index("Fuchs")))
lst.reverse()
print("Second occurence of Fuchs: {}".format(len(lst) - 1 - lst.index("Fuchs")))
lst = obser.split(',')
lst = obser.split(",")
lstR = lst.copy()
print("First ocurrence of Fuchs: {} : {}".format(lstR.index("Fuchs"),lst[lstR.index("Fuchs")]))
lstR.remove("Fuchs")
print("Second occurence of Fuchs: {} : {}".format(lstR.index("Fuchs")+1,lst[lstR.index("Fuchs")+1]))
print(lst)
lstSorted = lst.copy()
lstSorted.sort()
print(lstSorted)
###Output
['Elefant', 'Fuchs', 'Fuchs', 'Glurak', 'Hund', 'Hund', 'Mensch']
###Markdown
Exercise 5a) Create a list called organisms and store the Arabidopsis, Yeast and Synechocystis.b) Create a second list called genes and store the 27500, 5700, 2500.c) Combine the two lists into a dictionary.d) Print each key:value pair.
###Code
organisms = ["Arabidopsis","Yeast","Synechocystis"]
genes = [27500,5700,2500]
for organism, gene in zip(organisms,genes):
print(organism, gene)
dic = dict(zip(organisms,genes))
dic.keys()
dic.values()
for item in dic.items():
print(item)
###Output
('Arabidopsis', 27500)
('Yeast', 5700)
('Synechocystis', 2500)
|
_archived/sstats/sstats-v0.7.ipynb | ###Markdown
Soccerstats Predictions v0.7 The changelog from v0.5:* Implement down-sampling.* Use better ML algorithm and evaluation.* Experiment on single-class vs multi-class evaluation algorithms. A. Data Cleaning & Preparation 1. Read csv file
###Code
# load and cache data
stat_df = sqlContext.read\
.format("com.databricks.spark.csv")\
.options(header = True)\
.load("data/teamFixtures.csv")\
.cache()
# count hyphen nulls ("-") per column
def count_hyphen_null(df, col):
return df.where(df[col] == "-").count()
# count cols with "-" ie. null
total_rows = stat_df.count()
hyphen_rows = count_hyphen_null(stat_df, "gameFtScore")
to_remove = total_rows - hyphen_rows
print("Total rows: {}".format(total_rows))
print("Hyphen nulls: {}".format(hyphen_rows))
print("Rows to remove: {}".format(to_remove))
###Output
Total rows: 7393
Hyphen nulls: 7095
Rows to remove: 298
###Markdown
2. Filter-out "gameFtScore" column values
###Code
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
# replace non-"-" values with null
nullify_ft_column = udf(
lambda row_value: None if row_value != "-" else row_value,
StringType()
)
stat_df = (stat_df.withColumn("gameFtScore", nullify_ft_column(stat_df.gameFtScore)))
# drop Null values
stat_df = stat_df.dropna()
stat_df.select("gameFtScore").show(5)
print("Total rows: {}".format(stat_df.count()))
###Output
+-----------+
|gameFtScore|
+-----------+
| -|
| -|
| -|
| -|
| -|
+-----------+
only showing top 5 rows
Total rows: 7095
###Markdown
4. Read fixtures Json to dataframe
###Code
fx_df = sqlContext.read.json('data/fixtures.json')
fx_df.printSchema()
###Output
root
|-- fixture_id: string (nullable = true)
|-- ft_score: string (nullable = true)
###Markdown
5. Encode "fixture_id" on stat_df dataframe
###Code
import hashlib
from pyspark.sql.functions import array
def encode_string(value):
return hashlib.sha1(
value.encode("utf-8")
).hexdigest()
# add an encoded col to "stat_df"; fixture_id
fxcol_df = udf(
lambda row_value: encode_string(u"".join([x for x in row_value])),
StringType()
)
stat_df = (stat_df.withColumn("fixture_id", fxcol_df(array(
"leagueName",
"leagueDivisionName",
"gamePlayDate",
"gameHomeTeamName",
"gameAwayTeamName"
))))
# display some encoded fixtures
stat_df.select("fixture_id").show(5, False)
###Output
+----------------------------------------+
|fixture_id |
+----------------------------------------+
|0e65a84b97ab619fa980ca05c17b9f164a2544ef|
|b09498e7f918713d576d3cf32bfd53b42d2a3c00|
|d95c97258cb1c7ef3060594a392d0a76b0025931|
|8f5afa57d6922cce3121215450ed21d16eaddbbb|
|60df5c61dde404e9b28f111b10f220ce9fc4bf0a|
+----------------------------------------+
only showing top 5 rows
###Markdown
6. Concat the two dataframes: "stat_df" and "fx_df"
###Code
from pyspark.sql.functions import col
# use "left-outer-join" to concat
full_df = stat_df.alias("a")\
.join(fx_df, stat_df.fixture_id == fx_df.fixture_id, "left_outer")\
.select(*[col("a."+c) for c in stat_df.columns] + [fx_df.ft_score])
full_df.select("leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName", "ft_score").show(5, False)
###Output
+----------+-----------------------+------------+----------------+----------------+--------+
|leagueName|leagueDivisionName |gamePlayDate|gameHomeTeamName|gameAwayTeamName|ft_score|
+----------+-----------------------+------------+----------------+----------------+--------+
|Paraguay |Primera Div. - Clausura|2018-04-23 |Deportivo Santa |Sol de America |2-0 |
|England |Southern league |2018-04-23 |Hitchin Town |Dorchester |0-0 |
|England |PL 2 Div. 2 |2018-04-23 |Southampton U23 |Middlesbr. U23 |2-1 |
|Italy |Serie A |2018-04-23 |Genoa |Hellas Verona |3-1 |
|Argentina |Primera B Nacional |2018-04-23 |Dep. Riestra |Aldosivi |1-0 |
+----------+-----------------------+------------+----------------+----------------+--------+
only showing top 5 rows
###Markdown
7. Assess damage on "ft_score " nulls
###Code
# count nulls per column
def count_null(df, col):
return df.where(df[col].isNull()).count()
print("Total rows: {}".format(full_df.count()))
print("Ft_score nulls: {}".format(count_null(full_df, "ft_score")))
# drop null values in ft_Score
full_df = full_df.dropna()
print("Total rows: {}".format(full_df.count()))
print("Ft_score nulls: {}".format(count_null(full_df, "ft_score")))
###Output
Total rows: 6352
Ft_score nulls: 0
###Markdown
B. Machine Learning
###Code
# print dataframe schema
# full_df.printSchema()
###Output
_____no_output_____
###Markdown
1. Clean data
###Code
# drop unnecessary columns
ml_df = full_df.drop(
"gameID", "gamePlayDate", "gamePlayTime", "gameHomeTeamName",
"gameAwayTeamName", "gameHomeTeamID","gameAwayTeamID", "leagueName",
"leagueDivisionName", "gameFtScore", "fixture_id"
)
# separate col types: double & string
# double type features
dtype_features = [
"leagueCompletion", "HTS_teamPosition", "HTS_teamGamesPlayed", "HTS_teamGamesWon",
"HTS_teamGamesDraw", "HTS_teamGamesLost", "HTS_teamGoalsScored", "HTS_teamGoalsConceded",
"HTS_teamPoints", "HTS_teamPointsPerGame", "HTS_teamPPGlast8", "HTS_homeGamesWon",
"HTS_homeGamesDraw", "HTS_homeGamesLost", "HTS_homeGamesPlayed", "HTS_awayGamesWon",
"HTS_awayGamesDraw", "HTS_awayGamesLost", "HTS_awayGamesPlayed", "HTS_teamPPGHome",
"HTS_teamPPGAway", "HTS_teamAvgOpponentPPG", "HTS_homeGoalMargin_by1_wins",
"HTS_homeGoalMargin_by1_losses", "HTS_homeGoalMargin_by2_wins", "HTS_homeGoalMargin_by2_losses",
"HTS_homeGoalMargin_by3_wins", "HTS_homeGoalMargin_by3_losses", "HTS_homeGoalMargin_by4p_wins",
"HTS_homeGoalMargin_by4p_losses", "HTS_awayGoalMargin_by1_wins", "HTS_awayGoalMargin_by1_losses",
"HTS_awayGoalMargin_by2_wins", "HTS_awayGoalMargin_by2_losses", "HTS_awayGoalMargin_by3_wins",
"HTS_awayGoalMargin_by3_losses", "HTS_awayGoalMargin_by4p_wins", "HTS_awayGoalMargin_by4p_losses",
"HTS_totalGoalMargin_by1_wins", "HTS_totalGoalMargin_by1_losses", "HTS_totalGoalMargin_by2_wins",
"HTS_totalGoalMargin_by2_losses", "HTS_totalGoalMargin_by3_wins", "HTS_totalGoalMargin_by3_losses",
"HTS_totalGoalMargin_by4p_wins", "HTS_totalGoalMargin_by4p_losses", "HTS_homeGoalsScored",
"HTS_homeGoalsConceded", "HTS_homeGoalsScoredPerMatch", "HTS_homeGoalsConcededPerMatch",
"HTS_homeScored_ConcededPerMatch", "HTS_awayGoalsScored", "HTS_awayGoalsConceded",
"HTS_awayGoalsScoredPerMatch", "HTS_awayGoalsConcededPerMatch", "HTS_awayScored_ConcededPerMatch",
"ATS_teamPosition", "ATS_teamGamesPlayed", "ATS_teamGamesWon", "ATS_teamGamesDraw", "ATS_teamGamesLost",
"ATS_teamGoalsScored", "ATS_teamGoalsConceded", "ATS_teamPoints", "ATS_teamPointsPerGame",
"ATS_teamPPGlast8", "ATS_homeGamesWon", "ATS_homeGamesDraw", "ATS_homeGamesLost",
"ATS_homeGamesPlayed", "ATS_awayGamesWon", "ATS_awayGamesDraw", "ATS_awayGamesLost",
"ATS_awayGamesPlayed", "ATS_teamPPGHome", "ATS_teamPPGAway", "ATS_teamAvgOpponentPPG",
"ATS_homeGoalMargin_by1_wins", "ATS_homeGoalMargin_by1_losses", "ATS_homeGoalMargin_by2_wins",
"ATS_homeGoalMargin_by2_losses", "ATS_homeGoalMargin_by3_wins", "ATS_homeGoalMargin_by3_losses",
"ATS_homeGoalMargin_by4p_wins", "ATS_homeGoalMargin_by4p_losses", "ATS_awayGoalMargin_by1_wins",
"ATS_awayGoalMargin_by1_losses", "ATS_awayGoalMargin_by2_wins", "ATS_awayGoalMargin_by2_losses",
"ATS_awayGoalMargin_by3_wins", "ATS_awayGoalMargin_by3_losses", "ATS_awayGoalMargin_by4p_wins",
"ATS_awayGoalMargin_by4p_losses", "ATS_totalGoalMargin_by1_wins", "ATS_totalGoalMargin_by1_losses",
"ATS_totalGoalMargin_by2_wins", "ATS_totalGoalMargin_by2_losses", "ATS_totalGoalMargin_by3_wins",
"ATS_totalGoalMargin_by3_losses", "ATS_totalGoalMargin_by4p_wins", "ATS_totalGoalMargin_by4p_losses",
"ATS_homeGoalsScored", "ATS_homeGoalsConceded", "ATS_homeGoalsScoredPerMatch", "ATS_homeGoalsConcededPerMatch",
"ATS_homeScored_ConcededPerMatch", "ATS_awayGoalsScored", "ATS_awayGoalsConceded", "ATS_awayGoalsScoredPerMatch",
"ATS_awayGoalsConcededPerMatch", "ATS_awayScored_ConcededPerMatch"
]
# string type features
stype_features = [
"HTS_teamCleanSheetPercent", "HTS_homeOver1_5GoalsPercent",
"HTS_homeOver2_5GoalsPercent", "HTS_homeOver3_5GoalsPercent", "HTS_homeOver4_5GoalsPercent",
"HTS_awayOver1_5GoalsPercent", "HTS_awayOver2_5GoalsPercent", "HTS_awayOver3_5GoalsPercent",
"HTS_awayOver4_5GoalsPercent", "HTS_homeCleanSheets", "HTS_homeWonToNil", "HTS_homeBothTeamsScored",
"HTS_homeFailedToScore", "HTS_homeLostToNil", "HTS_awayCleanSheets", "HTS_awayWonToNil",
"HTS_awayBothTeamsScored", "HTS_awayFailedToScore", "HTS_awayLostToNil", "HTS_homeScored_ConcededBy_0",
"HTS_homeScored_ConcededBy_1", "HTS_homeScored_ConcededBy_2", "HTS_homeScored_ConcededBy_3",
"HTS_homeScored_ConcededBy_4", "HTS_homeScored_ConcededBy_5p", "HTS_homeScored_ConcededBy_0_or_1",
"HTS_homeScored_ConcededBy_2_or_3", "HTS_homeScored_ConcededBy_4p", "HTS_awayScored_ConcededBy_0",
"HTS_awayScored_ConcededBy_1", "HTS_awayScored_ConcededBy_2", "HTS_awayScored_ConcededBy_3",
"HTS_awayScored_ConcededBy_4", "HTS_awayScored_ConcededBy_5p", "HTS_awayScored_ConcededBy_0_or_1",
"HTS_awayScored_ConcededBy_2_or_3", "HTS_awayScored_ConcededBy_4p",
"ATS_teamCleanSheetPercent", "ATS_homeOver1_5GoalsPercent", "ATS_homeOver2_5GoalsPercent",
"ATS_homeOver3_5GoalsPercent", "ATS_homeOver4_5GoalsPercent", "ATS_awayOver1_5GoalsPercent",
"ATS_awayOver2_5GoalsPercent", "ATS_awayOver3_5GoalsPercent", "ATS_awayOver4_5GoalsPercent",
"ATS_homeCleanSheets", "ATS_homeWonToNil", "ATS_homeBothTeamsScored", "ATS_homeFailedToScore",
"ATS_homeLostToNil", "ATS_awayCleanSheets", "ATS_awayWonToNil", "ATS_awayBothTeamsScored",
"ATS_awayFailedToScore", "ATS_awayLostToNil", "ATS_homeScored_ConcededBy_0", "ATS_homeScored_ConcededBy_1",
"ATS_homeScored_ConcededBy_2", "ATS_homeScored_ConcededBy_3", "ATS_homeScored_ConcededBy_4",
"ATS_homeScored_ConcededBy_5p", "ATS_homeScored_ConcededBy_0_or_1", "ATS_homeScored_ConcededBy_2_or_3",
"ATS_homeScored_ConcededBy_4p", "ATS_awayScored_ConcededBy_0", "ATS_awayScored_ConcededBy_1",
"ATS_awayScored_ConcededBy_2", "ATS_awayScored_ConcededBy_3", "ATS_awayScored_ConcededBy_4",
"ATS_awayScored_ConcededBy_5p", "ATS_awayScored_ConcededBy_0_or_1", "ATS_awayScored_ConcededBy_2_or_3",
"ATS_awayScored_ConcededBy_4p"
]
# integer type features
itype_features = ["HTS_teamGoalsDifference", "ATS_teamGoalsDifference"]
# define feature cols
feature_cols = dtype_features + stype_features + itype_features
from pyspark.sql.types import DoubleType, IntegerType
# cast types to columns: doubles
ml_df = ml_df.select(*[col(c).cast("double").alias(c) for c in dtype_features] + stype_features + itype_features + [ml_df.ft_score])
# convert "HTS_teamGoalsDifference" & "ATS_teamGoalsDifference" to integer
int_udf = udf(
lambda r: int(r),
IntegerType()
)
# cast types to columns: integers
ml_df = ml_df.select(*[int_udf(col(col_name)).name(col_name) for col_name in itype_features] + stype_features + dtype_features + [ml_df.ft_score])
# convert percent cols to float
percent_udf = udf(
lambda r: float(r.split("%")[0])/100,
DoubleType()
)
# cast types to columns: strings
ml_df = ml_df.select(*[percent_udf(col(col_name)).name(col_name) for col_name in stype_features] + itype_features + dtype_features + [ml_df.ft_score])
# add extra column; over/under
over_under_udf = udf(
lambda r: "over" if (int(r.split("-")[0]) + int(r.split("-")[1])) > 3 else "under",
StringType()
)
ml_df = (ml_df.withColumn("over_under", over_under_udf(ml_df.ft_score)))
ml_df.select("ft_score", "over_under").show(5)
# drop "ft_score"
ml_df = ml_df.drop("ft_score")
# def x12_convert(value):
# if int(value.split("-")[0]) > int(value.split("-")[1]):
# label = "home"
# elif int(value.split("-")[0]) < int(value.split("-")[1]):
# label = "away"
# else:
# label = "draw"
# return label
# # add extra column; 1x2
# over_under_udf = udf(
# lambda r: x12_convert(r),
# StringType()
# )
# ml_df = (ml_df.withColumn("over_under", over_under_udf(ml_df.ft_score)))
# ml_df.select("ft_score", "over_under").show(15)
# # drop "ft_score"
# ml_df = ml_df.drop("ft_score")
# import pandas as pd
# numeric_features = [t[0] for t in ml_df.dtypes if t[1] == 'int' or t[1] == 'double']
# sampled_data = ml_df.select(numeric_features).sample(False, 0.01).toPandas()
# axs = pd.plotting.scatter_matrix(sampled_data, figsize=(12, 12));
# # Rotate axis labels and remove axis ticks
# n = len(sampled_data.columns)
# for i in range(n):
# v = axs[i, 0]
# v.yaxis.label.set_rotation(0)
# v.yaxis.label.set_ha('right')
# v.set_yticks(())
# h = axs[n-1, i]
# h.xaxis.label.set_rotation(90)
# h.set_xticks(())
###Output
_____no_output_____
###Markdown
2. Some featurization
###Code
from pyspark.ml.feature import StringIndexer
from pyspark.sql import Row
# index the label; "over_under"
df_indexed = StringIndexer(
inputCol = "over_under",
outputCol = "over_under_indx")\
.fit(ml_df)\
.transform(ml_df)\
.drop("over_under")\
.withColumnRenamed("over_under_indx", "over_under")
# from pyspark.ml.feature import Normalizer
# from pyspark.sql.functions import mean, stddev
# # normalize feature columns; [(x - mean)/std_dev]
# def normalize_col(df, cols):
# # find mean & std for each column
# aggExpr = []
# aggStd = []
# for col in cols:
# aggExpr.append(mean(df[col]).alias(col))
# aggStd.append(stddev(df[col]).alias(col + "_stddev"))
# averages = df.agg(*aggExpr).collect()[0]
# std_devs = df.agg(*aggStd).collect()[0]
# # standardize dataframe
# for col in cols:
# df = df.withColumn(col + "_norm", ((df[col] - averages[col]) / std_devs[col + "_stddev"]))\
# .drop(col)\
# .withColumnRenamed(col+"_norm", col)
# return df, averages, std_devs
# # normalize dataframe
# df_indexed, averages, std_devs = normalize_col(df_indexed, feature_cols)
from pyspark.ml.linalg import Vectors
df_indexed = df_indexed[feature_cols + ["over_under"]]
# vectorize labels and features
row = Row("label", "features")
label_fts = df_indexed.rdd.map(
lambda r: (row(r[-1], Vectors.dense(r[:-1])))
).toDF()
label_fts.show(5)
# from pyspark.ml.feature import PCA
# # select 10 meaningful features
# label_fts = PCA(
# k=14,
# inputCol="features",
# outputCol="pcaFeatures")\
# .fit(label_fts)\
# .transform(label_fts)\
# .drop("features")\
# .withColumnRenamed("pcaFeatures", "features")
# from pyspark.ml.feature import ChiSqSelector
# # select 10 meaningful features
# label_fts = ChiSqSelector(
# numTopFeatures=10,
# featuresCol="features",
# outputCol="cqsFeatures",
# labelCol="label")\
# .fit(label_fts)\
# .transform(label_fts)\
# .drop("features")\
# .withColumnRenamed("cqsFeatures", "features")
from pyspark.ml.feature import StandardScaler
# apply feature normalization to transform a feature to have
# a mean of 0 and standard deviation 1.
label_fts = StandardScaler(
inputCol="features",
outputCol="features_norm",
withStd=True,
withMean=True)\
.fit(label_fts)\
.transform(label_fts)\
.drop("features")\
.withColumnRenamed("features_norm", "features")
# split train/test values
train, test = label_fts.randomSplit([0.8, 0.2])
print("Train values: '{}'".format(train.count()))
print("Test values: '{}'".format(test.count()))
train.groupBy("label").count().orderBy("label").show()
label_0 = train.groupBy("label").count().collect()[0]["count"]
label_1 = train.groupBy("label").count().collect()[1]["count"]
# label_2 = train.groupBy("label").count().collect()[2]["count"]
# perform some down-sampling
train = train.sampleBy(
'label',
fractions={
0: float(label_1)/label_0,
1: float(label_1)/label_1})
train.groupBy("label").count().orderBy("label").show()
###Output
+-----+-----+
|label|count|
+-----+-----+
| 0.0| 1484|
| 1.0| 1459|
+-----+-----+
###Markdown
3. Apply some ML models
###Code
from pyspark.ml.classification import RandomForestClassifier
# random forest model
r_forest = RandomForestClassifier(
numTrees = 100,
labelCol = "label"
)
from time import time
# start timer
start_time = time()
# fit model
rf_model = r_forest.fit(train)
print("Training time taken: {0:.4f}(min)".format((time() - start_time)/60))
from pyspark.ml.evaluation import BinaryClassificationEvaluator
predictions = rf_model.transform(test)
acc_one = BinaryClassificationEvaluator(
rawPredictionCol="rawPrediction",
labelCol="label",
metricName="areaUnderROC")\
.evaluate(predictions)
print("Accuracy (binary): '{0:.4f}%'".format(acc_one*100))
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
acc_two = MulticlassClassificationEvaluator(
predictionCol="prediction",
labelCol="label",
metricName="f1")\
.evaluate(predictions)
print("Accuracy (multi-class): '{0:.4f}%'".format(acc_two*100))
# from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
# from pyspark.ml.evaluation import BinaryClassificationEvaluator
# # tune best performing model: random forest
# paramGrid = ParamGridBuilder()\
# .addGrid(r_forest.maxDepth, [5,10,15,20,25,30])\
# .addGrid(r_forest.numTrees, [30, 60, 90, 120, 150, 180, 200])\
# .build()
# # define evaluation metric
# evaluator = BinaryClassificationEvaluator(
# rawPredictionCol="rawPrediction",
# metricName="areaUnderROC"
# )
# # start tuning
# cv = CrossValidator(
# estimator=r_forest,
# estimatorParamMaps=paramGrid,
# evaluator=evaluator,
# numFolds=5
# )
# # start timer
# cv_start_time = time()
# # fit tuned model
# cvModel = cv.fit(train)
# # calculate time taken to tune prameters
# print "Hyper-param tuning time taken (min): ", (time() - cv_start_time)/60
# # accuracy after tuning
# train_pred = cvModel.transform(train)
# test_pred = cvModel.transform(test)
# print("Random forest accuracy (train): {0:.4f}%".format((evaluator.evaluate(train_pred))*100))
# print("Random forest accuracy (test): {0:.4f}%".format((evaluator.evaluate(test_pred))*100))
###Output
Random forest accuracy (train): 77.3717%
Random forest accuracy (test): 63.8220%
|
LSTM Generator/Generate Anime Synopsis.ipynb | ###Markdown
Loading Packages
###Code
import pandas as pd
import random
import re
from tqdm import tqdm
import numpy as np
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
import torch
import spacy
nlp = English()
import torch.nn as nn
import nltk
pd.options.display.max_columns = 500
import warnings
warnings.filterwarnings(action='ignore')
###Output
_____no_output_____
###Markdown
Loading data
###Code
data = pd.read_csv('Data/eda-data.csv',index_col=0)
synopsis = data.synopsis
print('Number of Anime synopsis we have: ',len(synopsis))
###Output
Number of Anime synopsis we have: 16610
###Markdown
Viewing some random synopsis
###Code
i = random.randint(0,len(synopsis))
print('Synopsis example\n\nAnime:{} \nSynopsis:{}\n'.format(data['anime_name'].values[i],synopsis.values[i]))
###Output
Synopsis example
Anime:Two Tea Two
Synopsis:The woman does the decision to coexist with the past.Returning to one person was not an answer. It is a new image.(Source: Official You Tube channel)
###Markdown
Data Cleaning
###Code
def remove_source(text):
cln_text = text
if '(Source' in cln_text:
cln_text,_,_ = cln_text.partition('(Source')
elif '[Written ' in cln_text:
cln_text,_,_ = cln_text.partition('[Written')
return cln_text
def clean_synopsis(data):
# removing hentai and kids tags
data = data[(data.Hentai != 1) & (data.Kids != 1)]
synopsis = data.synopsis
# removing very small synopsis
synopsis = synopsis.apply(lambda x: x if ((len(str(x).strip().split())<=300) and len(str(x).strip().split())>30 ) else -1)
synopsis = synopsis[synopsis!=-1]
# removing source text
synopsis = synopsis.apply(lambda x: remove_source(x))
# removing japanese characters
synopsis = synopsis.apply(lambda x: re.sub("([^\x00-\x7F])+"," ",x))
# remove symbols
rx = re.compile('^[&#/@`)(;<=\'"$%>]')
synopsis = synopsis.apply(lambda x: rx.sub('',x))
synopsis = synopsis.apply(lambda x: x.replace('>',""))
synopsis = synopsis.apply(lambda x: x.replace('`',""))
synopsis = synopsis.apply(lambda x: x.replace(')',""))
synopsis = synopsis.apply(lambda x: x.replace('(',""))
# removing adaptation animes (some relevant might get deleted but there aren`t a lot so we wont be affected as much)
synopsis = synopsis[synopsis.apply(lambda x: 'adaptation' not in str(x).lower())]
synopsis = synopsis[synopsis.apply(lambda x: 'music video' not in str(x).lower())]
synopsis = synopsis[synopsis.apply(lambda x: 'based on' not in str(x).lower())]
synopsis = synopsis[synopsis.apply(lambda x: 'spin-off' not in str(x).lower())]
return synopsis.reset_index(drop=True)
cleaned_synopsis = clean_synopsis(data)
print('Size: ',len(cleaned_synopsis))
###Output
Size: 7309
###Markdown
Configurations
###Code
class config:
tokenizer = nltk.word_tokenize
#data = AnimeDataset(cleaned_synopsis)
batch_size = 32
#vocab_size = data.vocab_size
seq_len = 30
emb_dim = 100
epochs = 15
hidden_dim = 512
model_path = 'lm_lrdecay_drop.bin'
###Output
_____no_output_____
###Markdown
Function to create batches
###Code
def create_dataset(synopsis,batch_size,seq_len):
np.random.seed(0)
synopsis = synopsis.apply(lambda x: str(x).lower()).values
synopsis_text = ' '.join(synopsis)
tokens = config.tokenizer(synopsis_text)
global num_batches
num_batches = int(len(tokens)/(seq_len*batch_size))
tokens = tokens[:num_batches*batch_size*seq_len]
words = sorted(set(tokens))
w2i = {w:i for i,w in enumerate(words)}
i2w = {i:w for i,w in enumerate(words)}
tokens = [w2i[tok] for tok in tokens]
target = np.zeros_like((tokens))
target[:-1] = tokens[1:]
target[-1] = tokens[0]
input_tok = np.reshape(tokens,(batch_size,-1))
target_tok = np.reshape(target,(batch_size,-1))
print(input_tok.shape)
print(target_tok.shape)
vocab_size = len(i2w)
return input_tok,target_tok,vocab_size,w2i,i2w
def create_batches(input_tok,target_tok,batch_size,seq_len):
num_batches = np.prod(input_tok.shape)//(batch_size*seq_len)
for i in range(0,num_batches*seq_len,seq_len):
yield input_tok[:,i:i+seq_len], target_tok[:,i:i+seq_len]
###Output
_____no_output_____
###Markdown
Defining model
###Code
class LSTMModel(nn.Module):
def __init__(self,hid_dim,emb_dim,vocab_size,num_layers=1):
super(LSTMModel,self).__init__()
self.hid_dim = hid_dim
self.emb_dim = emb_dim
self.num_layers = num_layers
self.vocab_size = vocab_size+1
self.embedding = nn.Embedding(self.vocab_size,self.emb_dim)
self.lstm = nn.LSTM(self.emb_dim,self.hid_dim,batch_first = True,num_layers = self.num_layers)
self.drop = nn.Dropout(0.3)
self.linear = nn.Linear(self.hid_dim,vocab_size) # from here we will randomly sample a word
def forward(self,x,prev_hid):
x = self.embedding(x)
x,hid = self.lstm(x,prev_hid)
x = self.drop(x)
x = self.linear(x)
return x,hid
def zero_state(self,batch_size):
return (torch.zeros(self.num_layers,batch_size,self.hid_dim),torch.zeros(self.num_layers,batch_size,self.hid_dim))
###Output
_____no_output_____
###Markdown
Utilities
###Code
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def loss_fn(predicted,target):
loss = nn.CrossEntropyLoss()
return loss(predicted,target)
###Output
_____no_output_____
###Markdown
Training Function
###Code
def train_fn(model,device,dataloader,optimizer):
model.train()
tk0 = tqdm(dataloader,position=0,leave=True,total = num_batches)
train_loss = AverageMeter()
hid_state,cell_state = model.zero_state(config.batch_size)
hid_state = hid_state.to(device)
cell_state = cell_state.to(device)
losses = []
for inp,target in tk0:
inp = torch.tensor(inp,dtype=torch.long).to(device)
target = torch.tensor(target,dtype=torch.long).to(device)
optimizer.zero_grad()
pred,(hid_state,cell_state) = model(inp,(hid_state,cell_state))
#print(pred.transpose(1,2).shape)
loss = loss_fn(pred.transpose(1,2),target)
hid_state = hid_state.detach()
cell_state = cell_state.detach()
loss.backward()
_ = torch.nn.utils.clip_grad_norm_(model.parameters(),max_norm=2) # to avoid gradient explosion
optimizer.step()
train_loss.update(loss.detach().item())
tk0.set_postfix(loss = train_loss.avg)
losses.append(loss.detach().item())
return np.mean(losses)
###Output
_____no_output_____
###Markdown
Crating the dataset
###Code
input_tok,target_tok,vocab_size,w2i,i2w = create_dataset(cleaned_synopsis,batch_size=config.batch_size,seq_len=config.seq_len)
###Output
(32, 25380)
(32, 25380)
###Markdown
Bringing it all together in the run function
###Code
def run():
device = 'cuda'
model = LSTMModel(vocab_size=vocab_size,emb_dim=config.emb_dim,hid_dim=config.hidden_dim,num_layers=3).to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode = 'min', patience=2, verbose=True, factor=0.5)
epochs = config.epochs
best_loss = 999
for i in range(1,epochs+1):
train_dataloader = create_batches(batch_size=config.batch_size,input_tok=input_tok,seq_len=config.seq_len,target_tok=target_tok)
print('Epoch..',i)
loss = train_fn(model,device,train_dataloader,optimizer)
if loss<best_loss:
best_loss = loss
torch.save(model.state_dict(),config.model_path)
scheduler.step(loss)
torch.cuda.empty_cache()
return model
model = run()
###Output
0%| | 0/846 [00:00<?, ?it/s]
###Markdown
Generation step
###Code
def inference(model,input_text,device,top_k=5,length = 100):
output = ''
model.eval()
tokens = config.tokenizer(input_text)
h,c = model.zero_state(1)
h = h.to(device)
c = c.to(device)
for t in tokens:
output = output+t+' '
pred,(h,c) = model(torch.tensor(w2i[t.lower()]).view(1,-1).to(device),(h,c))
#print(pred.shape)
for i in range(length):
_,top_ix = torch.topk(pred[0],k = top_k)
choices = top_ix[0].tolist()
choice = np.random.choice(choices)
out = i2w[choice]
output = output + out + ' '
pred,(h,c) = model(torch.tensor(choice,dtype=torch.long).view(1,-1).to(device),(h,c))
return output
device = 'cpu'
mod = LSTMModel(emb_dim=config.emb_dim,hid_dim=config.hidden_dim,vocab_size=vocab_size,num_layers=3).to(device)
mod.load_state_dict(torch.load(config.model_path))
print('AI generated Anime synopsis:')
inference(model = mod, input_text = 'In the ', top_k = 30, length = 100, device = device)
###Output
AI generated Anime synopsis:
|
monte_carlo_estimation.ipynb | ###Markdown
Monte Carlo Estimation[Monte Carlo Estimation](http://en.wikipedia.org/wiki/Monte_Carlo_method) is a method of numerically estimating things which we don't (or can't) calculate numerically by randomly generating samples. In this IPython Notebook, I'm going to use Monte Carlo Estimation to estimate:1. The area under a curve2. The value of $\pi$ Monte Carlo Estimation of AreaLet's use [Monte Carlo Estimation](http://en.wikipedia.org/wiki/Monte_Carlo_method) to estimate the area onder this curve (from 0 to 10): $$y = 5 * \sin(6~x) + \sin(2~x) + 7$$ Here are the basic steps:1. Define a rectangle which encloses the part of the curve for which we want to find area.2. Randomly generate points within that region.3. Find which of the randomly generated points are under the curve by checking them against the equation of the function; for a point (x,y), it is under the curve if y <= f(x).4. Find the ratio of the points under the curve to the points all the randomly generated points, and multipy that number by the area of the enclosing rectangle (see below for why). Why do we need to multiply by the area of the rectangle?1. $$\frac{area\ under\ curve}{area\ of\ rectangle} = \frac{points\ under\ curve}{points\ in\ rectangle}$$2. $$area\ under\ curve\ =\ {area\ of\ rectangle}~(\frac{points\ under\ curve}{points\ in\ rectangle})$$ Let's do it:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import random
import math
f = lambda x: 5 * np.sin(6 * x) + 3 * np.sin(2 * x) + 7
x = np.linspace(0, 10, 1000)
y = f(x)
_ = plt.plot(x,y)
NUM_POINTS = 10000
rect_width = 10
rect_height = 14
rand_x = lambda: random.uniform(0, rect_width)
rand_y = lambda: random.uniform(0, rect_height)
points = [(rand_x(), rand_y()) for i in range(NUM_POINTS)]
points_under = [point for point in points if point[1] <= f(point[0])]
points_above = list(set(points) - set(points_under))
# Separate x's and y's to pass to scatter function.
(under_x, under_y) = zip(*list(points_under))
(over_x, over_y) = zip(*list(points_above))
fig = plt.figure()
fig.set_size_inches(12, 8)
_ = plt.scatter(under_x, under_y, s=1, color='red')
_ = plt.scatter(over_x, over_y, s=1, color='green')
# Area = area of domain rectangle * num_points_under/num_points_total
area = rect_width * rect_height * len(points_under)*1.0/len(points)
print("Estimate of area under the curve:", area)
# Sanity check: it looks like the area under is about half of the rectangle, and the rectangle
# area is 10*14 = 140, so it should be around 70.
###Output
Estimate of area under the curve: 71.89
###Markdown
--- Monte Carlo Estimation of $\pi$Let's draw a circle by randomly generating points in the space ($x$,$y$) such that $x^{2}+y^{2}\leq1$
###Code
import random
NUM_POINTS = 10000
# Randomly generate points (x[i], y[i]) such that -1 <= x[i] = 1 and -1 <= y[i] <= 1.
x = [random.uniform(-1,1) for i in range(NUM_POINTS)]
y = [random.uniform(-1,1) for i in range(NUM_POINTS)]
circle_x = []
circle_y = []
outsiders_x = []
outsiders_y = []
# Determine which points are inside the circle (and for visualization purposes, also
# determine which are outside the circle).
for i in range(NUM_POINTS):
if x[i]**2 + y[i]**2 <= 1:
circle_x.append(x[i])
circle_y.append(y[i])
else:
outsiders_x.append(x[i])
outsiders_y.append(y[i])
# Plot it.
fig = plt.figure()
fig.set_size_inches(10, 10)
_ = plt.scatter(outsiders_x, outsiders_y, s=1, color='green')
_ = plt.scatter(circle_x, circle_y, s=1, color='red')
###Output
_____no_output_____
###Markdown
Now let's estimate $\pi$Here's a few facts:* The area of a circle is $\pi r^2$.* Since our circle only has a radius of 1, its area is $\pi$.* The area of the square is $2*2 = 4$.* The ratio of the **area of the circle** to the **area of the square** should be the same as the ratio of the **points inside the circle** to the **points inside the square*** So the ratio of $\frac{\pi}{4} = \frac{circle\ points}{total\ points}$. So $\pi = 4~\frac{circle\ points}{total\ points}$
###Code
print("Estimate of area of circle (pi):", 4 * (len(circle_x)*1.0 / len(x)))
###Output
Estimate of area of circle (pi): 3.1356
|
FeatureExtractionModule/src/autoencoder_approach/Per task approach/SX/autoencoder_classifiers-SX-low-vs-mid-vs-high-no-TFv1.ipynb | ###Markdown
Classifiers - SX - low vs mid vs high complexity - no TFv1Exploring different classifiers with different autoencoders for the NC task. No contractive autoencoder because it needs TFv1 compatibility. Table of contents: autoencoders: [Undercomplete Autoencoder](Undercomplete-Autoencoder) [Sparse Autoencoder](Sparse-Autoencoder) [Deep Autoencoder](Deep-Autoencoder) classifiers: [Simple dense classifier](Simple-dense-classifier) [LSTM-based classifier](LSTM-based-classifier) [kNN](kNN) [SVC](SVC) [Random Forest](Random-Forest) [XGBoost](XGBoost)
###Code
import datareader # made by the previous author for reading the collected data
import dataextractor # same as above
import pandas
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Activation, Input
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Conv1D, MaxPooling1D
from tensorflow.keras.optimizers import Adam, Nadam
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as K
tf.keras.backend.set_floatx('float32') # call this, to set keras to use float32 to avoid a warning message
metrics = ['accuracy']
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.multiclass import OneVsRestClassifier
import json
from datetime import datetime
import warnings
import matplotlib.pyplot as plt
import random
random.seed(1)
np.random.seed(4)
tf.random.set_seed(2)
# Start the notebook in the terminal with "PYTHONHASHSEED=0 jupyter notebook"
# or in anaconda "set PYTHONHASHSEED=0" then start jupyter notebook
import os
if os.environ.get("PYTHONHASHSEED") != "0":
raise Exception("You must set PYTHONHASHSEED=0 before starting the Jupyter server to get reproducible results.")
###Output
_____no_output_____
###Markdown
This is modfied original author's code for reading data:
###Code
def model_train(model, x_train, y_train, batch_size, epochs, x_valid, y_valid, x_test, y_test):
"""Train model with the given training, validation, and test set, with appropriate batch size and # epochs."""
epoch_data = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_valid, y_valid), verbose=0)
score = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
acc = score[1]
score = score[0]
return score, acc, epoch_data
def get_task_complexities_timeframes_br_hb(path, ident, seconds, checkIfValid=True):
"""Returns raw data along with task complexity class.
TODO: join functions. Add parameter to choose different task types and complexities"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
samp_rate = int(round(len(data[1]) / max(data[0])))
cog_res = dataread.read_cognitive_load_study(str(ident) + '-primary-extract.txt')
tasks_data = np.empty((0, seconds*samp_rate))
tasks_y = np.empty((0, 1))
breathing = np.empty((0,12))
heartbeat = np.empty((0,10))
busy_n = dataread.get_data_task_timestamps(return_indexes=True)
for i in cog_res['task_number']:
task_num_table = i - 225 # 0 - 17
tmp_tasks_data = np.empty((0, seconds*samp_rate))
tmp_tasks_y = np.empty((0, 1))
tmp_breathing = np.empty((0,12))
tmp_heartbeat = np.empty((0,10))
### task complexity classification
# if cog_res['task_complexity'][task_num_table] == 'medium':
# continue
if cog_res['task_label'][task_num_table] != 'SX':
continue
map_compl = {
'low': 0,
'medium': 2,
'high': 1
}
for j in range(10):
new_end = int(busy_n[task_num_table][1] - j * samp_rate)
new_start = int(new_end - samp_rate*30)
dataextract = dataextractor.DataExtractor(data[0][new_start:new_end],
data[1][new_start:new_end], samp_rate)
# get extracted features for breathing
tmpBR = dataextract.extract_from_breathing_time(data[0][new_start:new_end],
data[1][new_start:new_end])
#get extracted features for heartbeat
tmpHB = dataextract.extract_from_heartbeat_time(data[0][new_start:new_end],
data[1][new_start:new_end])
if checkIfValid and not(tmpBR['br_ok'][0]):
continue
try:
tmp_tasks_data = np.vstack((tmp_tasks_data, dataextract.y[-samp_rate * seconds:]))
tmp_tasks_y = np.vstack((tmp_tasks_y, map_compl.get(cog_res['task_complexity'][task_num_table])))
tmp_breathing = np.vstack((tmp_breathing, tmpBR.to_numpy(dtype='float64', na_value=0)[0][:-1]))
tmp_heartbeat = np.vstack((tmp_heartbeat, tmpHB.to_numpy(dtype='float64', na_value=0)[0][:-1]))
except ValueError:
# print(ident)
continue
tasks_data = np.vstack((tasks_data, dataextract.y))
tasks_y = np.vstack((tasks_y, map_compl.get(cog_res['task_complexity'][task_num_table])))
breathing = np.vstack((breathing, tmpBR.to_numpy(dtype='float64', na_value=0)[0][:-1]))
heartbeat = np.vstack((heartbeat, tmpHB.to_numpy(dtype='float64', na_value=0)[0][:-1]))
return tasks_data, tasks_y, breathing, heartbeat
def get_data_from_idents_br_hb(path, idents, seconds):
"""Go through all user data and take out windows of only <seconds> long time frames,
along with the given class (from 'divide_each_task' function).
"""
samp_rate = 43 # hard-coded sample rate
data, ys = np.empty((0, samp_rate*seconds)), np.empty((0, 1))
brs = np.empty((0,12))
hbs = np.empty((0,10))
combined = np.empty((0,22))
# was gettign some weird warnings; stack overflow said to ignore them
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for i in idents:
#x, y, br, hb = get_busy_vs_relax_timeframes_br_hb(path, i, seconds) # either 'get_busy_vs_relax_timeframes',
# get_engagement_increase_vs_decrease_timeframes, get_task_complexities_timeframes or get_TLX_timeframes
x, y, br, hb = get_task_complexities_timeframes_br_hb(path, i, seconds)
data = np.vstack((data, x))
ys = np.vstack((ys, y))
brs = np.vstack((brs, br))
hbs = np.vstack((hbs, hb))
combined = np.hstack((brs,hbs))
return data, ys, brs, hbs, combined
# Accs is a dictionary which holds 1d arrays of accuracies in each key
# except the key 'test id' which holds strings of the id which yielded the coresponding accuracies
def print_accs_stats(accs):
printDict = {}
# loop over each key
for key in accs:
if (key == 'test id'):
# skip calculating ids
continue
printDict[key] = {}
tmpDict = printDict[key]
# calculate and print some statistics
tmpDict['min'] = np.min(accs[key])
tmpDict['max'] = np.max(accs[key])
tmpDict['mean'] = np.mean(accs[key])
tmpDict['median'] = np.median(accs[key])
print(pandas.DataFrame.from_dict(printDict).to_string())
def clear_session_and_set_seeds():
# clear session and set seeds again
K.clear_session()
random.seed(1)
np.random.seed(4)
tf.random.set_seed(2)
###Output
_____no_output_____
###Markdown
Prepare data Initialize variables:
###Code
# initialize a dictionary to store accuracies for comparison
accuracies = {}
# used for reading the data into an array
seconds = 30 # time window length
samp_rate = 43 # hard-coded sample rate
phase_shape = np.empty((0, samp_rate*seconds))
y_shape = np.empty((0, 1))
breathing_shape = np.empty((0,12))
heartbeat_shape = np.empty((0,10))
combined_shape = np.empty((0,22))
idents = ['2gu87', 'iz2ps', '1mpau', '7dwjy', '7swyk', '94mnx', 'bd47a', 'c24ur', 'ctsax', 'dkhty', 'e4gay',
'ef5rq', 'f1gjp', 'hpbxa', 'pmyfl', 'r89k1', 'tn4vl', 'td5pr', 'gyqu9', 'fzchw', 'l53hg', '3n2f9',
'62i9y']
path = '../../../../../StudyData/'
# change to len(idents) at the end to use all the data
n = len(idents)
# Holds all the data so it doesnt have to be read from file each time
data_dict = {}
###Output
_____no_output_____
###Markdown
Fill the data dictionary:
###Code
for ident in idents.copy():
# read data
phase, y, breathing, heartbeat, combined = get_data_from_idents_br_hb(path, [ident], seconds)
if (y.shape[0] <= 0 or y.shape[0] == None):
idents.remove(ident)
print(ident)
continue
# initialize ident in
data_dict[ident] = {}
tmpDataDict = data_dict[ident]
# load data into dictionary
tmpDataDict['phase'] = phase
tmpDataDict['y'] = y
tmpDataDict['breathing'] = breathing
tmpDataDict['heartbeat'] = heartbeat
tmpDataDict['combined'] = combined
print(n)
n = len(idents)
print(n)
# load all phase data to use for training autoencoders
phase_all_train = get_data_from_idents_br_hb(path, idents[:-2], seconds)[0]
# Scale each row with MinMax to range [0,1]
phase_all_train = MinMaxScaler().fit_transform(phase_all_train.T).T
# load all validation phase data to use for training autoencoders
phase_all_valid = get_data_from_idents_br_hb(path, idents[-2:], seconds)[0]
# Scale each row with MinMax to range [0,1]
phase_all_valid = MinMaxScaler().fit_transform(phase_all_valid.T).T
###Output
_____no_output_____
###Markdown
Autoencoders Train autoencoders to save their encoded representations in the data dictionary:
###Code
# AE Training params
batch_size = 128
epochs = 1000
encoding_dim = 30
ae_encoded_shape = np.empty((0,encoding_dim))
def compare_plot_n(data1, data2, data3, plot_n=3):
#plot data1 values
plt.figure()
plt.figure(figsize=(20, 4))
for i in range(plot_n):
plt.subplot(1, 5, i+1)
plt.plot(data1[i])
#plot data2 values
plt.figure()
plt.figure(figsize=(20, 4))
for i in range(plot_n):
plt.subplot(1, 5, i+1)
plt.plot(data2[i])
#plot data3 values
plt.figure()
plt.figure(figsize=(20, 4))
for i in range(plot_n):
plt.subplot(1, 5, i+1)
plt.plot(data3[i])
###Output
_____no_output_____
###Markdown
Undercomplete Autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
###Code
def undercomplete_ae(x, encoding_dim=64, encoded_as_model=False):
# Simplest possible autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
# this is our input placeholder
input_data = Input(shape=x[0].shape, name="input")
dropout = Dropout(0.125, name="dropout", seed=42)(input_data)
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu', name="encoded")(dropout)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(x[0].shape[0], activation='sigmoid', name="decoded")(encoded)
autoencoder = Model(input_data, decoded)
# compile the model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)
# if return encoder in the encoded variable
if encoded_as_model:
encoded = Model(input_data, encoded)
return autoencoder, encoded
###Output
_____no_output_____
###Markdown
Train autoencoder on data:
###Code
clear_session_and_set_seeds()
uc_ae, uc_enc = undercomplete_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True)
uc_ae.fit(phase_all_train, phase_all_train,
validation_data=(phase_all_valid, phase_all_valid),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=0)
###Output
_____no_output_____
###Markdown
Plot signal, reconstruction and encoded representation:
###Code
data2 = uc_ae.predict(phase_all_valid)
data3 = uc_enc.predict(phase_all_valid)
compare_plot_n(phase_all_valid, data2, data3)
###Output
_____no_output_____
###Markdown
Store the encoded representations in the data dictionary:
###Code
for ident in data_dict:
tmpDataDict = data_dict[ident]
# read data
phase = tmpDataDict['phase']
uc_data = uc_enc.predict(phase)
# load data into dictionary
tmpDataDict['undercomplete_encoded'] = uc_data
###Output
_____no_output_____
###Markdown
Sparse Autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
###Code
def sparse_ae(x, encoding_dim=64, encoded_as_model=False):
# Simplest possible autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
# this is our input placeholder
input_data = Input(shape=x[0].shape, name="input")
dropout = Dropout(0.125, name="dropout", seed=42) (input_data)
# "encoded" is the encoded representation of the input
# add a sparsity constraint
encoded = Dense(encoding_dim, activation='relu', name="encoded",
activity_regularizer=regularizers.l1(10e-5))(dropout)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(x[0].shape[0], activation='sigmoid', name="decoded")(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_data, decoded, name="sparse_ae")
# compile the model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)
# if return encoder in the encoded variable
if encoded_as_model:
encoded = Model(input_data, encoded)
return autoencoder, encoded
###Output
_____no_output_____
###Markdown
Train autoencoder on data:
###Code
clear_session_and_set_seeds()
sp_ae, sp_enc = sparse_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True)
sp_ae.fit(phase_all_train, phase_all_train,
validation_data=(phase_all_valid, phase_all_valid),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=0)
###Output
_____no_output_____
###Markdown
Plot signal, reconstruction and encoded representation:
###Code
data2 = sp_ae.predict(phase_all_valid)
data3 = sp_enc.predict(phase_all_valid)
compare_plot_n(phase_all_valid, data2, data3)
###Output
_____no_output_____
###Markdown
Store the encoded representations in the data dictionary:
###Code
for ident in data_dict:
tmpDataDict = data_dict[ident]
# read data
phase = tmpDataDict['phase']
sp_data = sp_enc.predict(phase)
# load data into dictionary
tmpDataDict['sparse_encoded'] = sp_data
###Output
_____no_output_____
###Markdown
Deep Autoencoder from https://blog.keras.io/building-autoencoders-in-keras.html
###Code
def deep_ae(x, enc_layers=[512,128], encoding_dim=64, dec_layers=[128,512], encoded_as_model=False):
# From https://www.tensorflow.org/guide/keras/functional#use_the_same_graph_of_layers_to_define_multiple_models
input_data = keras.Input(shape=x[0].shape, name="normalized_signal")
model = Dropout(0.125, name="dropout", autocast=False, seed=42)(input_data)
for i in enumerate(enc_layers):
model = Dense(i[1], activation="relu", name="dense_enc_" + str(i[0]+1))(model)
encoded_output = Dense(encoding_dim, activation="relu", name="encoded_signal")(model)
encoded = encoded_output
model = layers.Dense(dec_layers[0], activation="sigmoid", name="dense_dec_1")(encoded_output)
for i in enumerate(dec_layers[1:]):
model = Dense(i[1], activation="sigmoid", name="dense_dec_" + str(i[0]+2))(model)
decoded_output = Dense(x[0].shape[0], activation="sigmoid", name="reconstructed_signal")(model)
autoencoder = Model(input_data, decoded_output, name="autoencoder")
# compile the model
autoencoder.compile(optimizer='adam', loss='binary_crossentropy', metrics=metrics)
# if return encoder in the encoded variable
if encoded_as_model:
encoded = Model(input_data, encoded)
return autoencoder, encoded
###Output
_____no_output_____
###Markdown
Train autoencoder on data:
###Code
clear_session_and_set_seeds()
de_ae, de_enc = deep_ae(phase_all_train, encoding_dim=encoding_dim, encoded_as_model=True)
de_ae.fit(phase_all_train, phase_all_train,
validation_data=(phase_all_valid, phase_all_valid),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=0)
###Output
_____no_output_____
###Markdown
Plot signal, reconstruction and encoded representation:
###Code
data2 = de_ae.predict(phase_all_valid)
data3 = de_enc.predict(phase_all_valid)
compare_plot_n(phase_all_valid, data2, data3)
###Output
_____no_output_____
###Markdown
Store the encoded representations in the data dictionary:
###Code
for ident in data_dict:
tmpDataDict = data_dict[ident]
# read data
phase = tmpDataDict['phase']
de_data = de_enc.predict(phase)
# load data into dictionary
tmpDataDict['deep_encoded'] = de_data
###Output
_____no_output_____
###Markdown
Helper function to get data from the dictionary:
###Code
def get_ident_data_from_dict(idents, data_dict):
# Initialize data variables
y = y_shape.copy()
phase = phase_shape.copy()
breathing = breathing_shape.copy()
heartbeat = heartbeat_shape.copy()
combined = combined_shape.copy()
undercomplete_encoded = ae_encoded_shape.copy()
sparse_encoded = ae_encoded_shape.copy()
deep_encoded = ae_encoded_shape.copy()
# Stack data form each ident into the variables
for tmp_id in idents:
y = np.vstack((y, data_dict[tmp_id]['y']))
phase = np.vstack((phase, data_dict[tmp_id]['phase']))
breathing = np.vstack((breathing, data_dict[tmp_id]['breathing']))
heartbeat = np.vstack((heartbeat, data_dict[tmp_id]['heartbeat']))
combined = np.vstack((combined, data_dict[tmp_id]['combined']))
undercomplete_encoded = np.vstack((undercomplete_encoded, data_dict[tmp_id]['undercomplete_encoded']))
sparse_encoded = np.vstack((sparse_encoded, data_dict[tmp_id]['sparse_encoded']))
deep_encoded = np.vstack((deep_encoded, data_dict[tmp_id]['deep_encoded']))
return y, phase, breathing, heartbeat, combined, undercomplete_encoded, sparse_encoded, deep_encoded
###Output
_____no_output_____
###Markdown
Classifiers Helper loop function definition A function that loops over all the data and calls the classifiers with it then stores the returned accuracies.
###Code
def helper_loop(classifier_function_train, idents, n=5, num_loops_to_average_over=1, should_scale_data=True):
#returns a dictionary with accuracies
# set the variables in the dictionary
accs = {}
accs['phase'] = []
accs['breathing'] = []
accs['heartbeat'] = []
accs['combined br hb'] = []
accs['undercomplete'] = []
accs['sparse'] = []
accs['deep'] = []
accs['test id'] = []
start_time = datetime.now()
# leave out person out validation
for i in range(n):
# print current iteration and time elapsed from start
print("iteration:", i+1, "of", n, "; time elapsed:", datetime.now()-start_time)
## ----- Data preparation:
validation_idents = [idents[i]]
test_idents = [idents[i-1]]
train_idents = []
for ident in idents:
if (ident not in test_idents) and (ident not in validation_idents):
train_idents.append(ident)
# save test id to see which id yielded which accuracies
accs['test id'].append(test_idents[0])
# Load train data
train_data = get_ident_data_from_dict(train_idents, data_dict)
y_train = train_data[0]
# Load validation data
valid_data = get_ident_data_from_dict(validation_idents, data_dict)
y_valid = valid_data[0]
# Load test data
test_data = get_ident_data_from_dict(test_idents, data_dict)
y_test = test_data[0]
data_names_by_index = ['y', 'phase', 'breathing', 'heartbeat',
'combined br hb', 'undercomplete', 'sparse', 'deep']
# Loop over all data that will be used for classification and send it to the classifier
# index 0 is y so we skip it
for index in range(1, len(test_data)):
clear_session_and_set_seeds()
train_x = train_data[index]
valid_x = valid_data[index]
test_x = test_data[index]
# Scale data
if should_scale_data:
# Scale with standard scaler
sscaler = StandardScaler()
sscaler.fit(train_x)
train_x = sscaler.transform(train_x)
# Scale valid and test with train's scaler
valid_x = sscaler.transform(valid_x)
test_x = sscaler.transform(test_x)
# Initialize variables
tmp_acc = []
data_name = data_names_by_index[index]
for tmp_index in range(num_loops_to_average_over):
curr_acc = classifier_function_train(train_x, y_train, valid_x, y_valid, test_x, y_test, data_name)
tmp_acc.append(curr_acc)
# Store accuracy
curr_acc = np.mean(tmp_acc)
accs[data_name].append(curr_acc)
# Print total time required to run this
end_time = datetime.now()
elapsed_time = end_time - start_time
print("Completed!", "Time elapsed:", elapsed_time)
return accs
###Output
_____no_output_____
###Markdown
Simple dense classifier Define the classifier:
###Code
params_dense_phase = {
'dropout': 0.3,
'hidden_size': 28,
'activation': 'sigmoid',
'loss': 'categorical_crossentropy',
'optimizer': Adam,
'batch_size': 128,
'learning_rate': 0.001,
'epochs': 300
}
params_dense_br_hb = {
'dropout': 0.05,
'hidden_size': 24,
'activation': 'sigmoid',
'loss': 'categorical_crossentropy',
'optimizer': Nadam,
'learning_rate': 0.05,
'batch_size': 128,
'epochs': 200
}
params_dense_ae_enc = {
'dropout': 0.1,
'hidden_size': 30,
'activation': 'relu',
'loss': 'categorical_crossentropy',
'optimizer': Adam,
'learning_rate': 0.01,
'batch_size': 106,
'epochs': 300
}
def dense_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
# change Ys to categorical (one hot encoding)
y_train = to_categorical(y_train, num_classes=3)
y_valid = to_categorical(y_valid, num_classes=3)
y_test = to_categorical(y_test, num_classes=3)
params = params_dense_br_hb
if (data_name == 'phase'):
params = params_dense_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_dense_ae_enc
# Define the model
model = Sequential()
model.add(Dropout(params['dropout']))
model.add(Dense(params['hidden_size']))
model.add(Activation(params['activation']))
model.add(Dense(3))
model.add(Activation('sigmoid'))
# Compile the model
model.compile(loss=params['loss'],
optimizer=params['optimizer'](learning_rate=params['learning_rate']),
metrics=metrics)
# Train the model and return the accuracy
sc, curr_acc, epoch_data = model_train(model, x_train, y_train, params['batch_size'], params['epochs'],
x_valid, y_valid, x_test, y_test)
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(dense_train, idents, n)
accuracies['simple_dense'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.083333 0.166667 0.000000 0.000000 0.000000
max 0.818182 0.923077 0.750000 1.000000 0.620690 0.846154 0.916667
mean 0.347794 0.336509 0.339192 0.462451 0.321442 0.321672 0.335285
median 0.347826 0.333333 0.266667 0.437500 0.360000 0.375000 0.333333
###Markdown
LSTM-based classifier based on the original author's code
###Code
params_lstm_phase = {
'kernel_size': 4,
'filters': 32,
'strides': 2,
'pool_size': 4,
'dropout': 0.01,
'lstm_output_size': 22,
'activation': 'relu',
'last_activation': 'sigmoid',
'loss': 'categorical_crossentropy',
'optimizer': Nadam,
'learning_rate': 0.005,
'batch_size': 186,
'epochs': 200
}
params_lstm_br_hb = {
'kernel_size': 2,
'filters': 12,
'strides': 2,
'pool_size': 1,
'dropout': 0.01,
'lstm_output_size': 64,
'activation': 'relu',
'last_activation': 'sigmoid',
'loss': 'categorical_crossentropy',
'optimizer': Nadam,
'learning_rate': 0.001,
'batch_size': 256,
'epochs': 100
}
params_lstm_ae_enc = {
'kernel_size': 2,
'filters': 6,
'strides': 2,
'pool_size': 2,
'dropout': 0.01,
'lstm_output_size': 32,
'activation': 'relu',
'last_activation': 'sigmoid',
'loss': 'categorical_crossentropy',
'optimizer': Nadam,
'learning_rate': 0.001,
'batch_size': 64,
'epochs': 100
}
def LSTM_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
# change Ys to categorical (one hot encoding)
y_train = to_categorical(y_train, num_classes=3)
y_valid = to_categorical(y_valid, num_classes=3)
y_test = to_categorical(y_test, num_classes=3)
params = params_lstm_br_hb
if (data_name == 'phase'):
params = params_lstm_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_lstm_ae_enc
# Reshape data to fit some layers
xt_train = x_train.reshape(-1, x_train[0].shape[0], 1)
xt_valid = x_valid.reshape(-1, x_valid[0].shape[0], 1)
xt_test = x_test.reshape(-1, x_test[0].shape[0], 1)
# Define the model
model = Sequential()
model.add(Dropout(params['dropout']))
model.add(Conv1D(params['filters'],
params['kernel_size'],
padding='valid',
activation=params['activation'],
strides=params['strides']))
model.add(MaxPooling1D(pool_size=params['pool_size']))
if (data_name == 'phase'):
model.add(Conv1D(params['filters'],
params['kernel_size'],
padding='valid',
activation=params['activation'],
strides=params['strides']))
model.add(MaxPooling1D(pool_size=params['pool_size']))
model.add(Dropout(params['dropout']))
model.add(LSTM(params['lstm_output_size']))
model.add(Dense(3))
model.add(Activation(params['last_activation']))
# Compile the model
model.compile(loss=params['loss'],
optimizer=params['optimizer'](learning_rate=params['learning_rate']),
metrics=['acc'])
# Train the model and return the accuracy
sc, curr_acc, epoch_data = model_train(model, xt_train, y_train, params['batch_size'], params['epochs'],
xt_valid, y_valid, xt_test, y_test)
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(LSTM_train, idents, n=n)
accuracies['LSTM'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.00000 0.700000 0.620690 0.800000 0.916667 0.916667 0.916667
mean 0.35245 0.302516 0.371224 0.393421 0.345948 0.333392 0.410779
median 0.36000 0.304348 0.409091 0.416667 0.360000 0.333333 0.400000
###Markdown
kNN
###Code
params_knn_phase = {
'n_neighbors': 3,
'metric': 'l2'
}
params_knn_br_hb = {
'n_neighbors': 15,
'metric': 'cosine'
}
params_knn_ae_enc = {
'n_neighbors': 5,
'metric': 'manhattan'
}
from sklearn.neighbors import KNeighborsClassifier
def KNN_classifier(params):
model = KNeighborsClassifier(n_neighbors=params['n_neighbors'], metric=params['metric'])
return model
def KNN_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_knn_br_hb
if (data_name == 'phase'):
params = params_knn_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_knn_ae_enc
model = OneVsRestClassifier(KNN_classifier(params))
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(KNN_train, idents, n)
accuracies['kNN'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.083333 0.000000 0.000000 0.000000
max 1.000000 0.888889 0.875000 0.772727 0.923077 0.923077 0.724138
mean 0.420725 0.325970 0.399417 0.417890 0.376446 0.404746 0.355345
median 0.375000 0.333333 0.370370 0.384615 0.333333 0.375000 0.333333
###Markdown
SVC
###Code
params_svc_phase = {
'C': 3,
'kernel': 'rbf',
'gamma': 'scale'
}
params_svc_br_hb = {
'C': 5,
'kernel': 'poly',
'gamma': 'scale'
}
params_svc_ae_enc = {
'C': 5,
'kernel': 'rbf',
'gamma': 'scale'
}
from sklearn.svm import SVC
def SVC_classifier(params):
model = SVC(random_state=42, C=params['C'], kernel=params['kernel'], gamma=params['gamma'])
return model
def SVC_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_svc_br_hb
if (data_name == 'phase'):
params = params_svc_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_svc_ae_enc
model = OneVsRestClassifier(SVC_classifier(params))
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(SVC_train, idents, n)
accuracies['SVC'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 0.700000 0.850000 0.633333 0.800000 0.869565 1.000000 0.833333
mean 0.300876 0.327056 0.365761 0.442734 0.325653 0.361799 0.327916
median 0.300000 0.333333 0.366667 0.448276 0.333333 0.333333 0.300000
###Markdown
Random Forest
###Code
params_rf_phase = {
'n_estimators': 190,
'max_depth': 50,
'min_samples_split': 4,
'min_samples_leaf': 2,
'oob_score': False,
'ccp_alpha': 0.005
}
params_rf_br_hb = {
'n_estimators': 190,
'max_depth': 20,
'min_samples_split': 3,
'min_samples_leaf': 3,
'oob_score': True,
'ccp_alpha': 0.015
}
params_rf_ae_enc = {
'n_estimators': 130,
'max_depth': 100,
'min_samples_split': 5,
'min_samples_leaf': 5,
'oob_score': True,
'ccp_alpha': 0.005
}
from sklearn.ensemble import RandomForestClassifier
def random_forest_classifier(params):
model = RandomForestClassifier(random_state=42,
n_estimators = params['n_estimators'],
criterion = 'entropy',
max_depth = params['max_depth'],
min_samples_split = params['min_samples_split'],
min_samples_leaf = params['min_samples_leaf'],
oob_score = params['oob_score'],
ccp_alpha = params['ccp_alpha'],
max_features = 'log2',
bootstrap = True)
return model
def random_forest_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_rf_br_hb
if (data_name == 'phase'):
params = params_rf_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_rf_ae_enc
model = OneVsRestClassifier(random_forest_classifier(params))
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(random_forest_train, idents, n, should_scale_data=False)
accuracies['random_forest'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.083333 0.000000 0.100000 0.000000 0.000000 0.000000
max 1.000000 0.923077 0.923077 0.923077 0.923077 0.923077 0.923077
mean 0.304441 0.382942 0.418474 0.447088 0.355563 0.359701 0.394711
median 0.333333 0.388889 0.400000 0.370370 0.333333 0.360000 0.416667
###Markdown
Naive Bayesian
###Code
from sklearn.naive_bayes import GaussianNB
def naive_bayesian_classifier():
model = GaussianNB()
return model
def naive_bayesian_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
model = OneVsRestClassifier(naive_bayesian_classifier())
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(naive_bayesian_train, idents, n)
accuracies['naive_bayesian'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 0.555556 1.000000 1.000000 1.000000 0.666667 0.666667 0.555556
mean 0.276881 0.356399 0.433414 0.440669 0.352478 0.314947 0.257973
median 0.333333 0.333333 0.344828 0.363636 0.375000 0.333333 0.296296
###Markdown
XGBoost
###Code
params_xgb_phase = {
'n_estimators': 50,
'max_depth': 50,
'booster': 'gbtree'
}
params_xgb_br_hb = {
'n_estimators': 50,
'max_depth': 4,
'booster': 'gbtree'
}
params_xgb_ae_enc = {
'n_estimators': 130,
'max_depth': 4,
'booster': 'gbtree'
}
from xgboost import XGBClassifier
def XGBoost_classifier(params):
model = XGBClassifier(random_state=42,
n_estimators=params['n_estimators'],
max_depth=params['max_depth'])
return model
def XGBoost_train(x_train, y_train, x_valid, y_valid, x_test, y_test, data_name):
params = params_xgb_br_hb
if (data_name == 'phase'):
params = params_xgb_phase
if (data_name == 'undercomplete' or data_name == 'sparse' or data_name == 'deep'):
params = params_xgb_ae_enc
model = OneVsRestClassifier(XGBoost_classifier(params))
model.fit(x_train, y_train.ravel())
curr_acc = np.sum(model.predict(x_test) == y_test.ravel()) / len(y_test.ravel())
return curr_acc
###Output
_____no_output_____
###Markdown
Combine the autoencoders with the classifier:
###Code
accs = helper_loop(XGBoost_train, idents, n, should_scale_data=False)
accuracies['XGBoost'] = accs
# print accuracies of each method and corresponding id which yielded that accuracy (same row)
pandas.DataFrame.from_dict(accs)
# print some statistics for each method
print_accs_stats(accs)
###Output
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.033333 0.066667 0.000000 0.000000 0.000000
max 1.000000 0.846154 0.826087 0.923077 0.923077 0.846154 0.750000
mean 0.344572 0.392224 0.404627 0.392427 0.426957 0.422854 0.378539
median 0.370370 0.388889 0.379310 0.360000 0.434783 0.440000 0.400000
###Markdown
Compare Accuracies Save all accuracies to results csv file:
###Code
results_path = "../../results/LvMvH/LvMvH-SX.csv"
# Make a dataframe from the accuracies
accs_dataframe = pandas.DataFrame(accuracies).T
# Save dataframe to file
accs_dataframe.to_csv(results_path, mode='w')
###Output
_____no_output_____
###Markdown
Print min, max, mean, median for each clasifier/autoencoder combination:
###Code
for classifier in accuracies:
print("-----------", classifier + ":", "-----------")
accs = accuracies[classifier]
print_accs_stats(accs)
print("\n")
###Output
----------- simple_dense: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.083333 0.166667 0.000000 0.000000 0.000000
max 0.818182 0.923077 0.750000 1.000000 0.620690 0.846154 0.916667
mean 0.347794 0.336509 0.339192 0.462451 0.321442 0.321672 0.335285
median 0.347826 0.333333 0.266667 0.437500 0.360000 0.375000 0.333333
----------- LSTM: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.00000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 1.00000 0.700000 0.620690 0.800000 0.916667 0.916667 0.916667
mean 0.35245 0.302516 0.371224 0.393421 0.345948 0.333392 0.410779
median 0.36000 0.304348 0.409091 0.416667 0.360000 0.333333 0.400000
----------- kNN: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.083333 0.000000 0.000000 0.000000
max 1.000000 0.888889 0.875000 0.772727 0.923077 0.923077 0.724138
mean 0.420725 0.325970 0.399417 0.417890 0.376446 0.404746 0.355345
median 0.375000 0.333333 0.370370 0.384615 0.333333 0.375000 0.333333
----------- SVC: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 0.700000 0.850000 0.633333 0.800000 0.869565 1.000000 0.833333
mean 0.300876 0.327056 0.365761 0.442734 0.325653 0.361799 0.327916
median 0.300000 0.333333 0.366667 0.448276 0.333333 0.333333 0.300000
----------- random_forest: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.083333 0.000000 0.100000 0.000000 0.000000 0.000000
max 1.000000 0.923077 0.923077 0.923077 0.923077 0.923077 0.923077
mean 0.304441 0.382942 0.418474 0.447088 0.355563 0.359701 0.394711
median 0.333333 0.388889 0.400000 0.370370 0.333333 0.360000 0.416667
----------- naive_bayesian: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000
max 0.555556 1.000000 1.000000 1.000000 0.666667 0.666667 0.555556
mean 0.276881 0.356399 0.433414 0.440669 0.352478 0.314947 0.257973
median 0.333333 0.333333 0.344828 0.363636 0.375000 0.333333 0.296296
----------- XGBoost: -----------
phase breathing heartbeat combined br hb undercomplete sparse deep
min 0.000000 0.000000 0.033333 0.066667 0.000000 0.000000 0.000000
max 1.000000 0.846154 0.826087 0.923077 0.923077 0.846154 0.750000
mean 0.344572 0.392224 0.404627 0.392427 0.426957 0.422854 0.378539
median 0.370370 0.388889 0.379310 0.360000 0.434783 0.440000 0.400000
###Markdown
Print all accuracies in table form:
###Code
for classifier in accuracies:
print(classifier + ":")
# print(pandas.DataFrame.from_dict(accuracies[classifier]))
# Using .to_string() gives nicer loooking results (doesn't split into new line)
print(pandas.DataFrame.from_dict(accuracies[classifier]).to_string())
print("\n")
###Output
simple_dense:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.000000 0.083333 0.083333 0.500000 0.250000 0.250000 0.500000 62i9y
1 0.086957 0.434783 0.304348 0.652174 0.478261 0.521739 0.434783 2gu87
2 0.720000 0.000000 0.200000 0.280000 0.400000 0.400000 0.520000 iz2ps
3 0.766667 0.533333 0.166667 0.233333 0.433333 0.666667 0.633333 1mpau
4 0.433333 0.033333 0.266667 0.566667 0.333333 0.500000 0.333333 7dwjy
5 0.033333 0.200000 0.500000 0.533333 0.366667 0.400000 0.000000 7swyk
6 0.272727 0.045455 0.500000 0.272727 0.136364 0.227273 0.272727 94mnx
7 0.444444 0.000000 0.555556 1.000000 0.000000 0.000000 0.000000 bd47a
8 0.450000 0.750000 0.750000 0.800000 0.300000 0.700000 0.200000 c24ur
9 0.413793 0.620690 0.137931 0.344828 0.620690 0.517241 0.620690 ctsax
10 0.347826 0.043478 0.173913 0.304348 0.086957 0.173913 0.173913 dkhty
11 0.375000 0.375000 0.500000 0.437500 0.375000 0.375000 0.000000 e4gay
12 0.520000 0.480000 0.480000 0.240000 0.360000 0.440000 0.360000 ef5rq
13 0.818182 0.318182 0.318182 0.409091 0.500000 0.454545 0.500000 f1gjp
14 0.172414 0.137931 0.551724 0.413793 0.517241 0.000000 0.379310 hpbxa
15 0.200000 0.433333 0.166667 0.533333 0.333333 0.000000 0.333333 pmyfl
16 0.166667 0.083333 0.250000 0.166667 0.083333 0.083333 0.916667 r89k1
17 0.550000 0.200000 0.150000 0.450000 0.500000 0.150000 0.150000 tn4vl
18 0.222222 0.555556 0.500000 0.777778 0.000000 0.000000 0.000000 td5pr
19 0.466667 0.333333 0.633333 0.400000 0.533333 0.433333 0.733333 gyqu9
20 0.185185 0.555556 0.259259 0.259259 0.481481 0.259259 0.296296 fzchw
21 0.153846 0.923077 0.153846 0.461538 0.153846 0.846154 0.153846 l53hg
22 0.200000 0.600000 0.200000 0.600000 0.150000 0.000000 0.200000 3n2f9
LSTM:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.166667 0.333333 0.583333 0.416667 0.166667 0.250000 0.500000 62i9y
1 0.434783 0.304348 0.173913 0.521739 0.000000 0.434783 0.478261 2gu87
2 0.400000 0.080000 0.320000 0.360000 0.200000 0.280000 0.400000 iz2ps
3 0.400000 0.166667 0.366667 0.266667 0.500000 0.333333 0.266667 1mpau
4 0.333333 0.266667 0.000000 0.333333 0.333333 0.333333 0.333333 7dwjy
5 0.000000 0.166667 0.533333 0.566667 0.333333 0.333333 0.333333 7swyk
6 0.363636 0.000000 0.500000 0.363636 0.363636 0.090909 0.454545 94mnx
7 1.000000 0.333333 0.000000 0.000000 0.222222 0.555556 0.222222 bd47a
8 0.400000 0.500000 0.500000 0.800000 0.200000 0.050000 0.450000 c24ur
9 0.344828 0.275862 0.413793 0.310345 0.586207 0.655172 0.793103 ctsax
10 0.000000 0.173913 0.478261 0.434783 0.478261 0.434783 0.434783 dkhty
11 0.375000 0.625000 0.250000 0.250000 0.375000 0.187500 0.375000 e4gay
12 0.360000 0.480000 0.560000 0.560000 0.360000 0.120000 0.200000 ef5rq
13 0.318182 0.500000 0.409091 0.590909 0.454545 0.454545 0.454545 f1gjp
14 0.344828 0.310345 0.620690 0.482759 0.448276 0.275862 0.448276 hpbxa
15 0.500000 0.333333 0.333333 0.033333 0.666667 0.666667 0.333333 pmyfl
16 0.916667 0.083333 0.583333 0.000000 0.916667 0.916667 0.916667 r89k1
17 0.000000 0.500000 0.300000 0.650000 0.000000 0.000000 0.400000 tn4vl
18 0.055556 0.055556 0.500000 0.333333 0.000000 0.000000 0.000000 td5pr
19 0.466667 0.433333 0.466667 0.466667 0.666667 0.533333 0.666667 gyqu9
20 0.518519 0.259259 0.111111 0.296296 0.481481 0.481481 0.333333 fzchw
21 0.307692 0.076923 0.384615 0.461538 0.153846 0.230769 0.153846 l53hg
22 0.100000 0.700000 0.150000 0.550000 0.050000 0.050000 0.500000 3n2f9
kNN:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.333333 0.166667 0.666667 0.500000 0.333333 0.333333 0.250000 62i9y
1 0.130435 0.434783 0.478261 0.739130 0.043478 0.043478 0.000000 2gu87
2 0.560000 0.160000 0.480000 0.520000 0.440000 0.440000 0.320000 iz2ps
3 0.833333 0.133333 0.300000 0.300000 0.566667 0.633333 0.466667 1mpau
4 0.333333 0.233333 0.100000 0.233333 0.333333 0.333333 0.333333 7dwjy
5 0.333333 0.433333 0.366667 0.666667 0.633333 0.633333 0.100000 7swyk
6 0.363636 0.090909 0.227273 0.772727 0.272727 0.227273 0.318182 94mnx
7 0.444444 0.888889 0.333333 0.222222 0.333333 0.666667 0.666667 bd47a
8 0.400000 0.550000 0.650000 0.700000 0.050000 0.200000 0.200000 c24ur
9 0.655172 0.344828 0.379310 0.689655 0.620690 0.620690 0.655172 ctsax
10 0.478261 0.043478 0.173913 0.173913 0.086957 0.086957 0.173913 dkhty
11 0.375000 0.312500 0.875000 0.375000 0.375000 0.375000 0.375000 e4gay
12 0.320000 0.080000 0.760000 0.440000 0.240000 0.360000 0.400000 ef5rq
13 0.045455 0.363636 0.090909 0.409091 0.500000 0.500000 0.454545 f1gjp
14 0.517241 0.275862 0.482759 0.275862 0.689655 0.758621 0.724138 hpbxa
15 0.666667 0.466667 0.500000 0.566667 0.600000 0.600000 0.600000 pmyfl
16 0.750000 0.000000 0.583333 0.083333 0.416667 0.166667 0.500000 r89k1
17 0.000000 0.350000 0.150000 0.300000 0.200000 0.250000 0.300000 tn4vl
18 0.000000 0.000000 0.111111 0.333333 0.000000 0.000000 0.000000 td5pr
19 0.566667 0.333333 0.800000 0.466667 0.566667 0.600000 0.600000 gyqu9
20 0.370370 0.370370 0.370370 0.259259 0.333333 0.407407 0.481481 fzchw
21 1.000000 0.615385 0.307692 0.384615 0.923077 0.923077 0.153846 l53hg
22 0.200000 0.850000 0.000000 0.200000 0.100000 0.150000 0.100000 3n2f9
SVC:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.166667 0.083333 0.416667 0.500000 0.166667 0.166667 0.333333 62i9y
1 0.130435 0.304348 0.565217 0.608696 0.000000 0.130435 0.478261 2gu87
2 0.560000 0.320000 0.360000 0.440000 0.520000 0.600000 0.680000 iz2ps
3 0.666667 0.333333 0.300000 0.433333 0.366667 0.066667 0.300000 1mpau
4 0.333333 0.200000 0.366667 0.566667 0.333333 0.333333 0.333333 7dwjy
5 0.000000 0.066667 0.366667 0.633333 0.033333 0.200000 0.300000 7swyk
6 0.454545 0.090909 0.227273 0.318182 0.318182 0.227273 0.272727 94mnx
7 0.444444 0.555556 0.000000 0.000000 0.333333 0.555556 0.000000 bd47a
8 0.700000 0.850000 0.300000 0.800000 0.500000 0.800000 0.350000 c24ur
9 0.241379 0.517241 0.517241 0.448276 0.413793 0.379310 0.137931 ctsax
10 0.260870 0.086957 0.304348 0.521739 0.869565 0.695652 0.652174 dkhty
11 0.375000 0.375000 0.625000 0.375000 0.375000 0.375000 0.375000 e4gay
12 0.040000 0.320000 0.520000 0.040000 0.480000 0.280000 0.200000 ef5rq
13 0.454545 0.500000 0.363636 0.318182 0.454545 0.500000 0.500000 f1gjp
14 0.344828 0.482759 0.551724 0.448276 0.344828 0.344828 0.344828 hpbxa
15 0.300000 0.333333 0.433333 0.366667 0.433333 0.333333 0.266667 pmyfl
16 0.333333 0.000000 0.416667 0.333333 0.833333 0.833333 0.833333 r89k1
17 0.300000 0.500000 0.100000 0.200000 0.150000 0.100000 0.250000 tn4vl
18 0.000000 0.000000 0.166667 0.555556 0.000000 0.000000 0.000000 td5pr
19 0.333333 0.333333 0.633333 0.500000 0.333333 0.400000 0.666667 gyqu9
20 0.000000 0.592593 0.370370 0.333333 0.000000 0.000000 0.037037 fzchw
21 0.230769 0.076923 0.307692 0.692308 0.230769 1.000000 0.230769 l53hg
22 0.250000 0.600000 0.200000 0.750000 0.000000 0.000000 0.000000 3n2f9
random_forest:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.333333 0.083333 0.833333 0.666667 0.250000 0.166667 0.500000 62i9y
1 0.000000 0.608696 0.043478 0.434783 0.000000 0.217391 0.000000 2gu87
2 0.400000 0.280000 0.040000 0.320000 0.320000 0.360000 0.320000 iz2ps
3 0.666667 0.233333 0.266667 0.333333 0.533333 0.533333 0.600000 1mpau
4 0.333333 0.133333 0.066667 0.333333 0.333333 0.333333 0.333333 7dwjy
5 0.000000 0.333333 0.333333 0.366667 0.666667 0.600000 0.400000 7swyk
6 0.045455 0.090909 0.500000 0.454545 0.318182 0.227273 0.318182 94mnx
7 0.111111 0.444444 0.000000 0.444444 0.444444 0.444444 0.666667 bd47a
8 0.500000 0.500000 0.550000 0.550000 0.150000 0.150000 0.300000 c24ur
9 0.344828 0.344828 0.586207 0.655172 0.413793 0.551724 0.551724 ctsax
10 1.000000 0.173913 0.652174 0.173913 0.086957 0.086957 0.130435 dkhty
11 0.375000 0.500000 0.375000 0.312500 0.375000 0.375000 0.375000 e4gay
12 0.320000 0.600000 0.760000 0.560000 0.080000 0.040000 0.040000 ef5rq
13 0.318182 0.590909 0.454545 0.772727 0.454545 0.454545 0.454545 f1gjp
14 0.344828 0.137931 0.379310 0.344828 0.413793 0.448276 0.517241 hpbxa
15 0.533333 0.500000 0.433333 0.333333 0.666667 0.666667 0.666667 pmyfl
16 0.666667 0.083333 0.166667 0.166667 0.250000 0.166667 0.416667 r89k1
17 0.000000 0.550000 0.700000 0.500000 0.250000 0.350000 0.500000 tn4vl
18 0.000000 0.388889 0.777778 0.833333 0.000000 0.000000 0.000000 td5pr
19 0.333333 0.300000 0.400000 0.333333 0.666667 0.633333 0.533333 gyqu9
20 0.222222 0.407407 0.333333 0.370370 0.481481 0.444444 0.481481 fzchw
21 0.153846 0.923077 0.923077 0.923077 0.923077 0.923077 0.923077 l53hg
22 0.000000 0.600000 0.050000 0.100000 0.100000 0.100000 0.050000 3n2f9
naive_bayesian:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.500000 0.166667 0.500000 0.666667 0.500000 0.333333 0.250000 62i9y
1 0.434783 0.826087 0.086957 0.521739 0.434783 0.434783 0.478261 2gu87
2 0.200000 0.400000 0.320000 0.400000 0.160000 0.000000 0.400000 iz2ps
3 0.333333 0.466667 0.333333 0.400000 0.333333 0.333333 0.133333 1mpau
4 0.333333 0.333333 0.300000 0.300000 0.333333 0.333333 0.333333 7dwjy
5 0.333333 0.333333 0.333333 0.566667 0.333333 0.333333 0.000000 7swyk
6 0.454545 0.136364 0.909091 0.363636 0.454545 0.454545 0.454545 94mnx
7 0.000000 0.000000 0.000000 0.000000 0.000000 0.666667 0.000000 bd47a
8 0.000000 1.000000 0.250000 1.000000 0.500000 0.500000 0.150000 c24ur
9 0.517241 0.034483 0.448276 0.344828 0.655172 0.482759 0.448276 ctsax
10 0.391304 0.521739 0.043478 0.086957 0.391304 0.391304 0.000000 dkhty
11 0.375000 0.625000 0.562500 0.625000 0.375000 0.375000 0.375000 e4gay
12 0.200000 0.200000 1.000000 0.200000 0.360000 0.280000 0.240000 ef5rq
13 0.000000 0.272727 0.909091 0.954545 0.454545 0.590909 0.500000 f1gjp
14 0.275862 0.000000 0.344828 0.275862 0.448276 0.000000 0.448276 hpbxa
15 0.333333 0.366667 0.333333 0.333333 0.000000 0.000000 0.333333 pmyfl
16 0.000000 0.083333 0.666667 0.333333 0.083333 0.000000 0.083333 r89k1
17 0.050000 0.500000 0.300000 0.300000 0.500000 0.500000 0.000000 tn4vl
18 0.555556 0.555556 0.444444 0.611111 0.555556 0.000000 0.555556 td5pr
19 0.333333 0.333333 0.666667 0.633333 0.666667 0.333333 0.000000 gyqu9
20 0.370370 0.111111 0.370370 0.222222 0.037037 0.370370 0.296296 fzchw
21 0.076923 0.230769 0.846154 0.846154 0.230769 0.230769 0.153846 l53hg
22 0.300000 0.700000 0.000000 0.150000 0.300000 0.300000 0.300000 3n2f9
XGBoost:
phase breathing heartbeat combined br hb undercomplete sparse deep test id
0 0.416667 0.166667 0.750000 0.666667 0.250000 0.250000 0.500000 62i9y
1 0.000000 0.521739 0.043478 0.173913 0.043478 0.086957 0.000000 2gu87
2 0.440000 0.360000 0.280000 0.360000 0.280000 0.440000 0.440000 iz2ps
3 0.666667 0.166667 0.266667 0.333333 0.633333 0.600000 0.600000 1mpau
4 0.333333 0.066667 0.033333 0.133333 0.333333 0.333333 0.333333 7dwjy
5 0.000000 0.333333 0.400000 0.066667 0.666667 0.333333 0.400000 7swyk
6 0.000000 0.000000 0.454545 0.136364 0.227273 0.181818 0.227273 94mnx
7 0.333333 0.777778 0.222222 0.555556 0.444444 0.666667 0.444444 bd47a
8 0.600000 0.700000 0.500000 0.550000 0.400000 0.600000 0.300000 c24ur
9 0.551724 0.551724 0.551724 0.655172 0.482759 0.517241 0.517241 ctsax
10 1.000000 0.130435 0.826087 0.173913 0.434783 0.347826 0.173913 dkhty
11 0.375000 0.625000 0.375000 0.437500 0.375000 0.375000 0.375000 e4gay
12 0.400000 0.520000 0.560000 0.680000 0.320000 0.200000 0.360000 ef5rq
13 0.272727 0.454545 0.363636 0.545455 0.500000 0.500000 0.454545 f1gjp
14 0.344828 0.068966 0.379310 0.379310 0.620690 0.586207 0.551724 hpbxa
15 0.566667 0.366667 0.366667 0.266667 0.700000 0.633333 0.633333 pmyfl
16 0.500000 0.083333 0.083333 0.166667 0.750000 0.416667 0.750000 r89k1
17 0.100000 0.500000 0.750000 0.600000 0.250000 0.600000 0.350000 tn4vl
18 0.000000 0.388889 0.722222 0.388889 0.000000 0.000000 0.000000 td5pr
19 0.500000 0.200000 0.533333 0.300000 0.566667 0.666667 0.433333 gyqu9
20 0.370370 0.592593 0.333333 0.333333 0.518519 0.444444 0.481481 fzchw
21 0.153846 0.846154 0.461538 0.923077 0.923077 0.846154 0.230769 l53hg
22 0.000000 0.600000 0.050000 0.200000 0.100000 0.100000 0.150000 3n2f9
|
doc/Class7-updated.ipynb | ###Markdown
--- Review of Data Collection --- Natural Language Processing We will demonstrate how to go through these five steps for English and Chinese texts respectively. 1. Data Cleaning- Main task: convert the case, remove punctuations and special characters like hashtags, hyperlinks- Use Regular Expression for Pattern Matching- Convert the case: `.lower()` Regular Expression Cheat Sheet- `.` matches any single character- `[...]` group matching, matches any one of the characters inside the square brackets- `[^x]` matches one character that is not x- `|` an “or” operator, matches patterns on either side of the |.- `*` matches at least 0 times.- `+` matches at least 1 times.- `?` matches at most 1 times.- `{n}` matches n times- `(...)` grouping in regular expressions- `\\N` – backreference to group N- `^` matches the start of the string.- `$` matches the end of the string. 1a. English
###Code
#install regular expression package for pattern matching
! pip3 install regex
import regex as re
#Use sub() function to match pattern and substitute the matched words with new pattern
a='I only have 100 dollars in my pocket. What I can buy?'
re.sub('I','You',a) #substitute a word with another word
#Substitute a word with nothing, meaning removing the word
re.sub('I','',a)
#Match a group of words by using []. "a-z" means all capital letters.
re.sub('[A-Z]','*',a)
#Remove all letters in lower case
re.sub('[a-z]','',a)
#Hide the numbers
re.sub('[0-9]','*',a)
#Remove all alphanumeric characters
re.sub('[0-9a-zA-Z]','',a)
#Remove all charaters that are not alphanumeric
re.sub('[^0-9a-zA-Z]',' ',a)
#Shortcut to remove all punctuation
re.sub('\p{P}+',' ',a) #\p stands for POSIX characters. {P} stands for punctuation.
#Remove hashtags
a='''@JerryNadler admits on #CNN they have no proof of Obstruction by @realDonaldTrump it's just his "personal opinion" Meet the new #WitchHunt Same as the old #WitchHunt cc @DonaldJTrumpJr'''
re.sub('#[^ ]+','',a)
#Extract hashtags by findall() function
re.findall('#[^ ]+',a)
#Extract all mentions
re.findall('@[^ ]+',a)
#Remove hyperlinks
a='Tesla’s abrupt shift to online-only car sales, after racing to open stores, battered its share price and raised questions about its future. https://goo.gl/rwGHTP'
re.sub('https://[^ ]+|http://[^ ]+','',a)
#tranform all letters to lower case
a.lower()
###Output
_____no_output_____
###Markdown
Practice Create a data_cleaning() function to convert letter case, remove punctuations, numbers, mentions, hashtags and hyperlinks
###Code
def data_cleaning(text):
text=text.lower()
text=re.sub('[0-9]+','',text)
text=re.sub('@[^ ]+','',text)
text=re.sub('#[^ ]+','',text)
text=re.sub('http://[^ ]+|https://[^ ]+','',text)
text=re.sub('\p{P}+',' ',text)
return(text)
#test your function with a post from @realDonaldTrump
a='@seanhannity “We the people will now be subjected to the biggest display of modern day McCarthyism....which is the widest fishing net expedition....every aspect of the presidents life....all in order to get power back so they can institute Socialism.” https://t.co/izb2tTrINB'
data_cleaning(a)
re.sub('\p{P}+',' ',re.sub('http://[^ ]+|https://[^ ]+|@[^ ]+|#[^ ]+','',a))
###Output
_____no_output_____
###Markdown
--- Break--- Tokenization- Definition: tokenization is a process of splitting sentences/paragraphs/documents into a set of words.- Differences in Languages: - English: **words** are naturally separated with spaces - Korean: **phrases** are naturally separated with spaces - konlpy (http://konlpy.org/) - Chinese/Japanese: **no spaces** in text - Chinese: jieba (https://github.com/fxsjy/jieba) - Japanese: jNlp (https://github.com/kevincobain2000/jProcessing) Tokenize English Text: Hunt for Spaces
###Code
#Split the following sentence into words
sentence='Mr. Zuckerberg, who runs Facebook, Instagram, WhatsApp and Messenger, on Wednesday expressed his intentions to change the essential nature of social media. Instead of encouraging public posts, he said he would focus on private and encrypted communications, in which users message mostly smaller groups of people they know. Unlike publicly shared posts that are kept as users’ permanent records, the communications could also be deleted after a certain period of time.'
sentence=data_cleaning(sentence)
words=sentence.split(' ')
import pandas as pd
###Output
_____no_output_____
###Markdown
**Extra Knowledge** We can use funtion gensim.parsing.preprocessing.stem_text(text) to stem words in the sentence. Tokenize Chinese Text We will use a package package "jieba" to tokenize Chinese text.**Why jieba?**- It adopts a hybrid method combining both statistical/probabilistic inference and pattern matching based on dictionary. - capable to recognize words existing in the pre-defined dictionary - capable to find new words.- Two dictionaries: - System dictionary - Simplied Chinese - Simplied+Traditional Chinese - User dictionary
###Code
! pip3 install jieba
import jieba
list(jieba.cut('你好,这是一个简单的句子。'))
#it can segment tradional Chinese text by using statistical inference method.
list(jieba.cut('你好,這是一個簡單的句子。'))
#however, statistical inference is not perfect.
list(jieba.cut('談判擱置,工會號召靜坐。'))
list(jieba.cut('谈判搁置,工会号召静坐。'))
###Output
_____no_output_____
###Markdown
Configurate Dictionaries To better segment traditional Chinese text, we need to upgrade system dictionary to include traditional Chinese words.Download the system dictionary from this link:https://github.com/fxsjy/jieba/raw/master/extra_dict/dict.txt.big
###Code
#load another dictionary to support traditional Chinese
jieba.set_dictionary('C:\\Users\\yuner\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\jieba\\dict.txt.big')
#try tokenizing this sentence again
list(jieba.cut('談判擱置,工會號召靜坐。'))
#Some names and special terminologies cannot be properly identified.
print(list(jieba.cut('中央上周二向特首林鄭月娥發公函'))) #very long name
print(list(jieba.cut('台灣蔡英文總統日前表示希望與日本舉行安保對話'))) #names including frequently used words
print(list(jieba.cut('高雄市長韓國瑜本月稍後訪問港澳深圳廈門四市'))) #names including frequently used words
print(list(jieba.cut('汶萊的全稱為汶萊達魯薩蘭國。'))) #special terminologies
#Build your user dictionary (time-consuming)
file=open('user_dict.txt','w',encoding='utf-8')
file.write('林鄭月娥\n')
file.write('蔡英文\n')
file.write('韓國瑜\n')
file.write('汶萊達魯薩蘭國\n')
file.close()
#Use your user dictionary
jieba.load_userdict('user_dict.txt')
#After loading user dictionary:
print(list(jieba.cut('中央上周二向特首林鄭月娥發公函'))) #very long name
print(list(jieba.cut('台灣蔡英文總統日前表示希望與日本舉行安保對話'))) #names including frequently used words
print(list(jieba.cut('高雄市長韓國瑜本月稍後訪問港澳深圳廈門四市'))) #names including frequently used words
print(list(jieba.cut('汶萊的全稱為汶萊達魯薩蘭國。'))) #terminologies
###Output
['中央', '上周二', '向', '特首', '林鄭月娥', '發', '公函']
['台灣', '蔡英文', '總統', '日前', '表示', '希望', '與', '日本', '舉行', '安保', '對話']
['高雄', '市長', '韓國瑜', '本月', '稍後', '訪問', '港澳', '深圳', '廈門', '四市']
['汶萊', '的', '全稱', '為', '汶萊達魯薩蘭國', '。']
###Markdown
Remove stop wordsStop words are useless for understanding text.In English: at, in, on, for, of, a, an, the...In Chinese: 的,地,得,了. However, the combination of 不得了 (holy great) is not a stop word which is used to convey extreme compliment over something.√ Absolute Match. × Pattern Matching
###Code
'a' in ['a','b','c']
'a' in ['aa','b','c']
'a' not in ['aa','b','c']
###Output
_____no_output_____
###Markdown
Chinese stop words file: https://juniorworld.github.io/python-workshop-2018/doc/stop_words_chi.txtEnglish stop words file: https://juniorworld.github.io/python-workshop-2018/doc/stop_words_eng.txt
###Code
file_chi=open('C:\\Users\\yuner\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\jieba\\stop_words_chi.txt','r',encoding='utf-8')
stop_words_chi=[]
for line in file_chi.readlines():
line=line.strip() #remove line break
stop_words_chi.append(line) #update the list of stop words line by line
file_chi.close()
len(stop_words_chi)
file_eng=open('C:\\Users\\yuner\\AppData\\Local\\Programs\\Python\\Python36\\Lib\\site-packages\\jieba\\stop_words_eng.txt','r')
stop_words_eng=[]
for line in file_eng.readlines():
line=line.strip() #remove line break
stop_words_eng.append(line) #update the list of stop words line by line
file_eng.close()
len(stop_words_eng)
###Output
_____no_output_____
###Markdown
Absolute Match of Stop words
###Code
sentence='Facebook将向加密通信转型,打造以隐私为中心的平台。'
words=list(jieba.cut(sentence))
words_new=[]
for word in words:
if word not in stop_words_chi:
words_new.append(word)
words_new
#for loop in the list
a=[1,2,3,4,5]
b=[i+1 for i in a] #increase element by one
#for loop and if statement in the list
a=[1,2,3,4,5]
b=[i for i in a if i<4]
b
words_new=[word for word in list(jieba.cut(sentence)) if word not in stop_words_chi]
words_new
#Clean and tokenize this sentence and remove the stop words
sentence='Mr. Zuckerberg, who runs Facebook, Instagram, WhatsApp and Messenger, on Wednesday expressed his intentions to change the essential nature of social media. Instead of encouraging public posts, he said he would focus on private and encrypted communications, in which users message mostly smaller groups of people they know. Unlike publicly shared posts that are kept as users’ permanent records, the communications could also be deleted after a certain period of time.'
words_new=[word for word in data_cleaning(sentence).split(' ') if word not in stop_words_eng]
words_new
###Output
_____no_output_____
###Markdown
Practice Find the 10 fade-in and fade-out words in speeches.The magnitude of difference is measured by the change in their relative frequencies:Relative Freq (RF) = word frequency / max word frequencyDifference = RF2019 - RF2009 Options:- Chinese: Annual government work reports, 2019 vs 2009- English: State of the Union address, 2019 vs 2009*Hint:**1. You can use `pd.concat([df1,df2],axis=1)` to combine two data frames by columns**2. You can use `df.fillna(0)` to replace NAN value with 0.**3. You can use `df.sort_values(column_name)` to sort a certain column.*
###Code
freq_words=pd.Series(words).value_counts()
freq_words/max(freq_words)
file_2019=open('doc/2019_Government_Work_Report.txt','r',encoding='utf-8')
file_2009=open('doc/2009_Government_Work_Report.txt','r',encoding='utf-8')
#CHI
words_new=[]
for line in file_2019.readlines():
line=line.strip()
line=data_cleaning(line)
words=list(jieba.cut(line))
for word in words:
if word not in stop_words_chi:
words_new.append(word)
#ENG
words_new=[]
for line in file_2019.readlines():
line=line.strip()
line=data_cleaning(line)
words=line.split(' ')
for word in words:
if word not in stop_words_chi:
words_new.append(word)
words_new_2009=[]
for line in file_2009.readlines():
line=line.strip()
line=data_cleaning(line)
words=list(jieba.cut(line))
for word in words:
if word not in stop_words_chi:
words_new_2009.append(word)
words_new_2009
freq_2019=pd.Series(words_new).value_counts()
freq_2009=pd.Series(words_new_2009).value_counts()
relative_freq_2019=freq_2019/max(freq_2019)
relative_freq_2009=freq_2009/max(freq_2009)
relative_freq=pd.concat([relative_freq_2019,relative_freq_2009],axis=1)
relative_freq=relative_freq.fillna(0)
relative_freq['diff']=relative_freq[0]-relative_freq[1]
relative_freq.columns=['2019','2009','diff']
relative_freq.sort_values('diff').tail(10)
###Output
_____no_output_____ |
Introduction to TF 4 AI, ML, DL/Course_1_Part_8_Lesson_4_Notebook.ipynb | ###Markdown
###Code
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \
-O /tmp/horse-or-human.zip
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \
-O /tmp/validation-horse-or-human.zip
###Output
_____no_output_____
###Markdown
The following python code will use the OS library to use Operating System libraries, giving you access to the file system, and the zipfile library allowing you to unzip the data.
###Code
import os
import zipfile
local_zip = '/tmp/horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/horse-or-human')
local_zip = '/tmp/validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation-horse-or-human')
zip_ref.close()
###Output
_____no_output_____
###Markdown
The contents of the .zip are extracted to the base directory `/tmp/horse-or-human`, which in turn each contain `horses` and `humans` subdirectories.In short: The training set is the data that is used to tell the neural network model that 'this is what a horse looks like', 'this is what a human looks like' etc. One thing to pay attention to in this sample: We do not explicitly label the images as horses or humans. If you remember with the handwriting example earlier, we had labelled 'this is a 1', 'this is a 7' etc. Later you'll see something called an ImageGenerator being used -- and this is coded to read images from subdirectories, and automatically label them from the name of that subdirectory. So, for example, you will have a 'training' directory containing a 'horses' directory and a 'humans' one. ImageGenerator will label the images appropriately for you, reducing a coding step. Let's define each of these directories:
###Code
# Directory with our training horse pictures
train_horse_dir = os.path.join('/tmp/horse-or-human/horses')
# Directory with our training human pictures
train_human_dir = os.path.join('/tmp/horse-or-human/humans')
# Directory with our training horse pictures
validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/validation-horses')
# Directory with our training human pictures
validation_human_dir = os.path.join('/tmp/validation-horse-or-human/validation-humans')
###Output
_____no_output_____
###Markdown
Building a Small Model from ScratchBut before we continue, let's start defining the model:Step 1 will be to import tensorflow.
###Code
import tensorflow as tf
###Output
_____no_output_____
###Markdown
We then add convolutional layers as in the previous example, and flatten the final result to feed into the densely connected layers. Finally we add the densely connected layers. Note that because we are facing a two-class classification problem, i.e. a *binary classification problem*, we will end our network with a [*sigmoid* activation](https://wikipedia.org/wiki/Sigmoid_function), so that the output of our network will be a single scalar between 0 and 1, encoding the probability that the current image is class 1 (as opposed to class 0).
###Code
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# The fifth convolution
#tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
#tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')
])
###Output
_____no_output_____
###Markdown
The model.summary() method call prints a summary of the NN
###Code
model.summary()
###Output
_____no_output_____
###Markdown
The "output shape" column shows how the size of your feature map evolves in each successive layer. The convolution layers reduce the size of the feature maps by a bit due to padding, and each pooling layer halves the dimensions. Next, we'll configure the specifications for model training. We will train our model with the `binary_crossentropy` loss, because it's a binary classification problem and our final activation is a sigmoid. (For a refresher on loss metrics, see the [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/descending-into-ml/video-lecture).) We will use the `rmsprop` optimizer with a learning rate of `0.001`. During training, we will want to monitor classification accuracy.**NOTE**: In this case, using the [RMSprop optimization algorithm](https://wikipedia.org/wiki/Stochastic_gradient_descentRMSProp) is preferable to [stochastic gradient descent](https://developers.google.com/machine-learning/glossary/SGD) (SGD), because RMSprop automates learning-rate tuning for us. (Other optimizers, such as [Adam](https://wikipedia.org/wiki/Stochastic_gradient_descentAdam) and [Adagrad](https://developers.google.com/machine-learning/glossary/AdaGrad), also automatically adapt the learning rate during training, and would work equally well here.)
###Code
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
###Output
_____no_output_____
###Markdown
Data PreprocessingLet's set up data generators that will read pictures in our source folders, convert them to `float32` tensors, and feed them (with their labels) to our network. We'll have one generator for the training images and one for the validation images. Our generators will yield batches of images of size 300x300 and their labels (binary).As you may already know, data that goes into neural networks should usually be normalized in some way to make it more amenable to processing by the network. (It is uncommon to feed raw pixels into a convnet.) In our case, we will preprocess our images by normalizing the pixel values to be in the `[0, 1]` range (originally all values are in the `[0, 255]` range).In Keras this can be done via the `keras.preprocessing.image.ImageDataGenerator` class using the `rescale` parameter. This `ImageDataGenerator` class allows you to instantiate generators of augmented image batches (and their labels) via `.flow(data, labels)` or `.flow_from_directory(directory)`. These generators can then be used with the Keras model methods that accept data generators as inputs: `fit_generator`, `evaluate_generator`, and `predict_generator`.
###Code
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
# Flow training images in batches of 128 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
'/tmp/horse-or-human/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=128,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow training images in batches of 128 using train_datagen generator
validation_generator = validation_datagen.flow_from_directory(
'/tmp/validation-horse-or-human/', # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
###Output
_____no_output_____
###Markdown
TrainingLet's train for 15 epochs -- this may take a few minutes to run.Do note the values per epoch.The Loss and Accuracy are a great indication of progress of training. It's making a guess as to the classification of the training data, and then measuring it against the known label, calculating the result. Accuracy is the portion of correct guesses.
###Code
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
validation_data = validation_generator,
validation_steps=8)
###Output
_____no_output_____
###Markdown
Running the ModelLet's now take a look at actually running a prediction using the model. This code will allow you to choose 1 or more files from your file system, it will then upload them, and run them through the model, giving an indication of whether the object is a horse or a human.
###Code
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = '/content/' + fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0]>0.5:
print(fn + " is a human")
else:
print(fn + " is a horse")
###Output
_____no_output_____
###Markdown
Visualizing Intermediate RepresentationsTo get a feel for what kind of features our convnet has learned, one fun thing to do is to visualize how an input gets transformed as it goes through the convnet.Let's pick a random image from the training set, and then generate a figure where each row is the output of a layer, and each image in the row is a specific filter in that output feature map. Rerun this cell to generate intermediate representations for a variety of training images.
###Code
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
# Let's define a new Model that will take an image as input, and will output
# intermediate representations for all layers in the previous model after
# the first.
successive_outputs = [layer.output for layer in model.layers[1:]]
#visualization_model = Model(img_input, successive_outputs)
visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)
# Let's prepare a random input image from the training set.
horse_img_files = [os.path.join(train_horse_dir, f) for f in train_horse_names]
human_img_files = [os.path.join(train_human_dir, f) for f in train_human_names]
img_path = random.choice(horse_img_files + human_img_files)
img = load_img(img_path, target_size=(300, 300)) # this is a PIL image
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
# Rescale by 1/255
x /= 255
# Let's run our image through our network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)
# These are the names of the layers, so can have them as part of our plot
layer_names = [layer.name for layer in model.layers]
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv / maxpool layers, not the fully-connected layers
n_features = feature_map.shape[-1] # number of features in feature map
# The feature map has shape (1, size, size, n_features)
size = feature_map.shape[1]
# We will tile our images in this matrix
display_grid = np.zeros((size, size * n_features))
for i in range(n_features):
# Postprocess the feature to make it visually palatable
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std()
x *= 64
x += 128
x = np.clip(x, 0, 255).astype('uint8')
# We'll tile each filter into this big horizontal grid
display_grid[:, i * size : (i + 1) * size] = x
# Display the grid
scale = 20. / n_features
plt.figure(figsize=(scale * n_features, scale))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis')
###Output
_____no_output_____
###Markdown
As you can see we go from the raw pixels of the images to increasingly abstract and compact representations. The representations downstream start highlighting what the network pays attention to, and they show fewer and fewer features being "activated"; most are set to zero. This is called "sparsity." Representation sparsity is a key feature of deep learning.These representations carry increasingly less information about the original pixels of the image, but increasingly refined information about the class of the image. You can think of a convnet (or a deep network in general) as an information distillation pipeline. Clean UpBefore running the next exercise, run the following cell to terminate the kernel and free memory resources:
###Code
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
###Output
_____no_output_____ |
instructor/Vibroseis_data.ipynb | ###Markdown
Vibroseis data**[Download the data from source](http://www.geofizyka.pl/2D_Land_vibro_data_2ms.tgz) or from [Agile's S3 bucket]( https://s3.amazonaws.com/agilegeo/2D_Land_vibro_data_2ms.tgz).**This prestack 2D land Vibroseis dataset was donated to the public domain by [Geofizyka Torun, Poland](http://www.geofizyka.pl/).More info about this line:- Info about this line [on SEG Wiki](http://wiki.seg.org/wiki/2D_Vibroseis_Line_001). - A [Madagascar tutorial](http://ahay.org/wikilocal/docs/school10.pdf) using this line, by Yang Liu.- A [FreeUSP tutorial](http://www.freeusp.org/RaceCarWebsite/TechTransfer/Tutorials/Processing_2D/Processing_2D.html) using this line, by Paul Garossino.
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import obspy
obspy.__version__
ls -l ../data/poland
###Output
_____no_output_____
###Markdown
We'll use this helper function later.
###Code
def view_header(string, width=80):
try:
# Make sure we don't have a ``bytes`` object.
string = string.decode()
except:
# String is already a string, carry on.
pass
lines = int(np.ceil(len(string) / width))
result = ''
for i in range(lines):
line = string[i*width:i*width+width]
result += line + (width-len(line))*' ' + '\n'
print(result)
return
###Output
_____no_output_____
###Markdown
Load data
###Code
filename = '../data/poland/Line_001.sgy'
from obspy.io.segy.segy import _read_segy
section = _read_segy(filename)
###Output
_____no_output_____
###Markdown
The file-wide header:
###Code
view_header(section.textual_file_header)
###Output
_____no_output_____
###Markdown
The import line is this: C 5 DATA TRACES/RECORD: 282 AUXILIARY TRACES/RECORD: 2 CDP FOLD There are 282 data traces, plus 2 auxilliary traces, so a total of **284 traces in each record**.Let's also check a trace header:
###Code
section.traces[3].header
###Output
_____no_output_____
###Markdown
There's also a readme file:
###Code
!cat ../data/poland/Line_001.TXT
###Output
_____no_output_____
###Markdown
This might be useful, but remember not to believe anything you read. Explore and organize the dataFirst we'll collect the traces and reshape them into a volume.
###Code
raw = np.vstack([t.data for t in section.traces])
raw.shape
###Output
_____no_output_____
###Markdown
First 1000 traces:
###Code
plt.figure(figsize=(18,8))
plt.imshow(raw[:1000, :].T, cmap="Greys", vmin=-.1, vmax=.1, aspect=0.25, interpolation='none')
plt.colorbar(shrink=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Recall that there are 284 traces (282 + 2 auxilliary) per ensemble, we can use the `reshape` trick of passing `-1` as one of the dimensions to get it to compute that axis on the fly, given the other two dimensions. We'll pass the last dimension of the input data to avoid changing the shape in that dimension.
###Code
data = raw.reshape((-1, 284, raw.shape[-1]))
plt.figure(figsize=(18,8))
plt.imshow(data[90, :, :].T, cmap="Greys", vmin=-1, vmax=1, aspect=0.1, interpolation='none')
plt.colorbar(shrink=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
There are two special data traces at the start of each ensemble. Let's pull those out so we have 'pure' gathers.
###Code
gathers = data[:, 2:, :]
vm = np.percentile(gathers, 99)
plt.figure(figsize=(18,8))
plt.imshow(gathers[0, :, :].T, cmap="Greys", vmin=-vm, vmax=vm, aspect=0.1, interpolation='none')
plt.colorbar(shrink=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Let's go back and look at that zeroth trace — we'll just look at the one on the 91st gather:
###Code
t90 = data[0,:,:]
plt.figure(figsize=(16,3))
plt.plot(t90[0,:])
plt.show()
#np.savetxt("../data/poland_wavelet.txt", t90[0,:])
###Output
_____no_output_____
###Markdown
Source and receiver positionsLet's look at the source and receiver data.
###Code
!head -25 ../data/poland/Line_001.RPS
###Output
_____no_output_____
###Markdown
The obvious way to load this sort of data is `pandas`...
###Code
names = ['Record', 'Point', 'Static', 'Easting', 'Northing', 'Elevation']
cols = [0, 1, 2, 7, 8, 9]
import pandas
rcv = pandas.read_csv('../data/poland/Line_001.RPS',
delim_whitespace=True,
skiprows=20,
usecols=cols,
names=names,
)
rcv.head()
rcv.describe()
###Output
_____no_output_____
###Markdown
Hopefully the source data is the same...
###Code
!head -25 ../data/poland/Line_001.SPS
###Output
_____no_output_____
###Markdown
It is!
###Code
src = pandas.read_csv('../data/poland/Line_001.SPS',
delim_whitespace=True,
skiprows=20,
usecols=cols,
names=names,
)
src.head()
###Output
_____no_output_____
###Markdown
Now plot them together.
###Code
plt.scatter(src.Easting, src.Northing, c='r', lw=0, s=3, alpha=0.5, label='src')
plt.scatter(rcv.Easting, rcv.Northing, c='b', lw=0, s=2, alpha=0.4, label='rcv')
plt.legend(loc=2)
plt.show()
!head -25 ../data/poland/Line_001.XPS
###Output
_____no_output_____
###Markdown
Brute stackWe can stack the traces as they are, without any noise suppression, NMO correction, etc.
###Code
gathers.shape
brute = np.mean(gathers, axis=1)
vm = np.percentile(brute, 99)
plt.figure(figsize=(18,8))
plt.imshow(brute.T, cmap="Greys", vmin=-vm, vmax=vm, aspect=0.1, interpolation='none')
plt.colorbar(shrink=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Write this out to SEG-Y
###Code
from obspy.core import Trace, Stream, UTCDateTime
from obspy.io.segy.segy import SEGYTraceHeader
stream = Stream()
for i, trace in enumerate(brute):
# Make the trace.
tr = Trace(trace)
# Add required data.
tr.stats.delta = 0.004
tr.stats.starttime = 0 # Not strictly required.
# Add yet more to the header (optional).
tr.stats.segy = {'trace_header': SEGYTraceHeader()}
tr.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
tr.stats.segy.trace_header.receiver_group_elevation = 0
# Append the trace to the stream.
stream.append(tr)
from obspy.core import AttribDict
from obspy.io.segy.segy import SEGYBinaryFileHeader
# Text header.
stream.stats = AttribDict()
stream.stats.textual_file_header = '{:80s}'.format('This is the textual header.').encode()
stream.stats.textual_file_header += '{:80s}'.format('This file contains a brute stack.').encode()
stream.stats.textual_file_header += '{:80s}'.format('The original file header and trace headers disagree on sample interval.').encode()
stream.stats.textual_file_header += '{:80s}'.format('I think the header is probably right, it is 4 ms so records are 6 s.').encode()
stream.stats.textual_file_header += '{:80s}'.format('Only useful lines from original file header:').encode()
stream.stats.textual_file_header += '{:80s}'.format('C 2 LINE: LINE_001 AREA MAP ID ').encode()
stream.stats.textual_file_header += '{:80s}'.format('C 4 INSTRUMENT: MFG MODEL SERIAL NO ').encode()
stream.stats.textual_file_header += '{:80s}'.format('C 5 DATA TRACES/RECORD: 282 AUXILIARY TRACES/RECORD: 2 CDP FOLD ').encode()
stream.stats.textual_file_header += '{:80s}'.format('C 6 SAMPLE INTERNAL: 4MS SAMPLES/TRACE: 750 BITS/IN BYTES/SAMPLE 4 ').encode()
# Binary header.
stream.stats.binary_file_header = SEGYBinaryFileHeader()
stream.stats.binary_file_header.trace_sorting_code = 4
stream.stats.binary_file_header.seg_y_format_revision_number = 0x0100
import sys
stream.write('../data/poland_brute_stack.sgy', format='SEGY', data_encoding=5, byteorder=sys.byteorder)
###Output
_____no_output_____
###Markdown
NMO velocityFrom Madagascar velocity scan: https://www.dropbox.com/s/alski0p047ylwu0/Screenshot%202016-09-14%2009.28.40.png?raw=1Min velocity (blue): 2200 m/s, max velocity (red): 4250 m/s
###Code
import numpy as np
velocity = np.load('../data/poland/Velocity.npy')
plt.imshow(velocity, cmap='viridis')
plt.colorbar()
mi, ma = np.amin(velocity), np.amax(velocity)
mi, ma
# There are 251 gathers, and 1501 time samples
# So we need this array to be 1501 rows by 251 columns.
from scipy.misc import imresize
v = imresize(velocity,(1501, 251))
###Output
_____no_output_____
###Markdown
We lost the scaling:
###Code
np.amin(v), np.amax(v)
###Output
_____no_output_____
###Markdown
Let's also fix the orientation — we want traces in the first dimension.
###Code
v = ((v/255).T * (ma - mi) + mi).astype(np.int16)
np.amin(v), np.amax(v)
plt.plot(v[40])
plt.imshow(v.T, aspect=0.1)
plt.colorbar()
stream = Stream()
for i, trace in enumerate(v):
# Make the trace.
tr = Trace(trace)
# Add required data.
tr.stats.delta = 0.004
tr.stats.starttime = 0 # Not strictly required.
# Add yet more to the header (optional).
tr.stats.segy = {'trace_header': SEGYTraceHeader()}
tr.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
tr.stats.segy.trace_header.receiver_group_elevation = 0
# Append the trace to the stream.
stream.append(tr)
# Text header.
stream.stats = AttribDict()
stream.stats.textual_file_header = '{:80s}'.format('This is the textual header.').encode()
stream.stats.textual_file_header += '{:80s}'.format('This file contains velocity data.').encode()
# Binary header.
stream.stats.binary_file_header = SEGYBinaryFileHeader()
stream.stats.binary_file_header.trace_sorting_code = 4
stream.stats.binary_file_header.seg_y_format_revision_number = 0x0100
# Encodings:
# 1: IBM, 32-bit float
# 2: 32-bit int
# 3: 16-bit int
# 4: obselete
# 5: IEEE, 32-bit float
# 8: 8-bit int
stream.write('../data/poland/poland_velocity.sgy', format='SEGY', data_encoding=3, byteorder=sys.byteorder)
###Output
_____no_output_____ |
Pruebas_Livianas.ipynb | ###Markdown
Programa paso a paso
###Code
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
import seaborn as sns
import matplotlib.pyplot as plt
df_train = pd.read_csv('train.csv',index_col='Id')
df_test = pd.read_csv('test.csv',index_col='Id')
df_train.columns
#Columnas a eliminar, dado los factores en el README.md
no_relevancia = ['index','month', 'day', 'month', 'NO2', 'O3', 'DEWP', 'station']
df_train.drop(columns= no_relevancia, inplace= True)
df_test.drop(columns= no_relevancia, inplace= True)
df_train.head()
###Output
_____no_output_____
###Markdown
De primera mano podemos observar que podría ser necesario estandarizar los datos en las siguientes columnas:* year: Categorizar los valores y evitar los miles* hour: Siempre y cuando no esté en formato militar* TEMP: Evitar valores negativos (?)* wd: Categorizarlo como dummies
###Code
df_train.isna().sum()
df_train.dtypes
df_train["year"].value_counts()
print(f"TEMP\nmin: {df_train['TEMP'].min()}\nmax: {df_train['TEMP'].max()}")
df_train["wd"].value_counts()
###Output
_____no_output_____
###Markdown
La dirección tiene mas valores de los que esperaba, creo deberia sintetizarlo en valores binarios para N, E, S, W
###Code
df_train["TEMP"] =(df_train["TEMP"]-df_train["TEMP"].min())/(df_train["TEMP"].max()-df_train["TEMP"].min())
df_test["TEMP"] =(df_test["TEMP"]-df_test["TEMP"].min())/(df_test["TEMP"].max()-df_test["TEMP"].min())
def Estandarizar_Direccion(df):
for idx in df.index:
valor_cargado = df.loc[idx, "wd"]
df.loc[idx, "N"] = 1 if "N" in valor_cargado else 0
df.loc[idx, "S"] = 1 if "S" in valor_cargado else 0
df.loc[idx, "E"] = 1 if "E" in valor_cargado else 0
df.loc[idx, "W"] = 1 if "W" in valor_cargado else 0
df.drop(columns=["wd"])
Estandarizar_Direccion(df_train)
Estandarizar_Direccion(df_test)
df_train.drop(columns= ["wd"], inplace= True)
df_test.drop(columns= ["wd"], inplace= True)
df_train["year"] = df_train["year"]-2013
df_test["year"] = df_test["year"]-2013
df_train.head()
df_test["PM2.5"] = 0
df_test.head()
X = df_train.drop(columns=["PM2.5"])
y = df_train["PM2.5"]
X_train,x_test,y_train, y_test = train_test_split(X,y)
corr = X.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
###Output
_____no_output_____
###Markdown
Claro...1. Puedo dejar la dirección definida por solo N y E, la auscencia de ellas simbolizaria lo contrario2. Podría probar a remover la presión atmosferica y mantener la temperaturaLo hare mas abajo para mantener los datos y ver diferencias 
###Code
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.neural_network import MLPRegressor
###Output
_____no_output_____
###Markdown
Con valores por default el 'RandomForestRegressor' es el modelo con mayor precisión.
###Code
rfr_0 = RandomForestRegressor(
n_estimators= 100,
criterion= "mse",
min_samples_split= 2,
min_samples_leaf= 1
)
rfr_0.fit(X_train,y_train)
train_score = rfr_0.score(X_train,y_train)
test_score = rfr_0.score(x_test, y_test)
print('Train_score :', train_score,'\nTest_Score:' ,test_score,'\n')
X = df_train.drop(columns=["PM2.5","S","W"])
X_train,x_test,y_train, y_test = train_test_split(X,y)
rfr_0.fit(X_train,y_train)
train_score = rfr_0.score(X_train,y_train)
test_score = rfr_0.score(x_test, y_test)
print('Train_score :', train_score,'\nTest_Score:' ,test_score,'\n')
df_test.head()
X.head()
columnas_eliminar = ['S','W','PM2.5']
y_test= df_test.drop(columns=columnas_eliminar)
y_pred = rfr_0.predict(y_test)
y_pred
y_test['PM2.5'] = y_pred
y_test.head()
pd.
y_test['PM2.5'].to_csv('prueba_01',index=True)
###Output
_____no_output_____ |
4-cliques-triangles-structures-student.ipynb | ###Markdown
Load DataAs usual, let's start by loading some network data. This time round, we have a [physician trust](http://konect.uni-koblenz.de/networks/moreno_innovation) network, but slightly modified such that it is undirected rather than directed.> This directed network captures innovation spread among 246 physicians in for towns in Illinois, Peoria, Bloomington, Quincy and Galesburg. The data was collected in 1966. A node represents a physician and an edge between two physicians shows that the left physician told that the righ physician is his friend or that he turns to the right physician if he needs advice or is interested in a discussion. There always only exists one edge between two nodes even if more than one of the listed conditions are true.
###Code
# Load the network.
G = cf.load_physicians_network()
# Make a Circos plot of the graph
from nxviz import CircosPlot
c = CircosPlot(G)
c.draw()
###Output
_____no_output_____
###Markdown
QuestionWhat can you infer about the structure of the graph from the Circos plot? Structures in a GraphWe can leverage what we have learned in the previous notebook to identify special structures in a graph. In a network, cliques are one of these special structures. CliquesIn a social network, cliques are groups of people in which everybody knows everybody. Triangles are a simple example of cliques. Let's try implementing a simple algorithm that finds out whether a node is present in a triangle or not.The core idea is that if a node is present in a triangle, then its neighbors' neighbors' neighbors should include itself.
###Code
# Example code.
def in_triangle(G, node):
"""
Returns whether a given node is present in a triangle relationship or not.
"""
# We first assume that the node is not present in a triangle.
is_in_triangle = False
# Then, iterate over every pair of the node's neighbors.
for nbr1, nbr2 in combinations(G.neighbors(node), 2):
# Check to see if there is an edge between the node's neighbors.
# If there is an edge, then the given node is present in a triangle.
if G.has_edge(nbr1, nbr2):
is_in_triangle = True
# We break because any triangle that is present automatically
# satisfies the problem requirements.
break
return is_in_triangle
in_triangle(G, 3)
###Output
_____no_output_____
###Markdown
In reality, NetworkX already has a function that *counts* the number of triangles that any given node is involved in. This is probably more useful than knowing whether a node is present in a triangle or not, but the above code was simply for practice.
###Code
nx.triangles(G, 3)
###Output
_____no_output_____
###Markdown
ExerciseCan you write a function that takes in one node and its associated graph as an input, and returns a list or set of itself + all other nodes that it is in a triangle relationship with? Do not return the triplets, but the `set`/`list` of nodes. (5 min.)**Possible Implementation:** If I check every pair of my neighbors, any pair that are also connected in the graph are in a triangle relationship with me.Hint: Python's [`itertools`](https://docs.python.org/3/library/itertools.html) module has a `combinations` function that may be useful.Hint: NetworkX graphs have a `.has_edge(node1, node2)` function that checks whether an edge exists between two nodes.Verify your answer by drawing out the subgraph composed of those nodes.
###Code
# Possible answer
def get_triangles(G, node):
neighbors = set(G.neighbors(node))
triangle_nodes = set()
"""
Fill in the rest of the code below.
"""
return triangle_nodes
# Verify your answer with the following funciton call. Should return something of the form:
# {3, 9, 11, 41, 42, 67}
get_triangles(G, 3)
# Then, draw out those nodes.
nx.draw(G.subgraph(get_triangles(G, 3)), with_labels=True)
# Compare for yourself that those are the only triangles that node 3 is involved in.
neighbors3 = G.neighbors(3)
neighbors3.append(3)
nx.draw(G.subgraph(neighbors3), with_labels=True)
###Output
_____no_output_____
###Markdown
Friend Recommendation: Open TrianglesNow that we have some code that identifies closed triangles, we might want to see if we can do some friend recommendations by looking for open triangles.Open triangles are like those that we described earlier on - A knows B and B knows C, but C's relationship with A isn't captured in the graph. What are the two general scenarios for finding open triangles that a given node is involved in?1. The given node is the centre node.1. The given node is one of the termini nodes. ExerciseCan you write a function that identifies, for a given node, the other two nodes that it is involved with in an open triangle, if there is one? (5 min.)Note: For this exercise, only consider the case when the node of interest is the centre node.**Possible Implementation:** Check every pair of my neighbors, and if they are not connected to one another, then we are in an open triangle relationship.
###Code
# Fill in your code here.
def get_open_triangles(G, node):
"""
There are many ways to represent this. One may choose to represent only the nodes involved
in an open triangle; this is not the approach taken here.
Rather, we have a code that explicitly enumrates every open triangle present.
"""
open_triangle_nodes = []
neighbors = set(G.neighbors(node))
for n1, n2 in combinations(G.neighbors(node), 2):
...
return open_triangle_nodes
# # Uncomment the following code if you want to draw out each of the triplets.
# nodes = get_open_triangles(G, 2)
# for i, triplet in enumerate(nodes):
# fig = plt.figure(i)
# nx.draw(G.subgraph(triplet), with_labels=True)
print(get_open_triangles(G, 3))
len(get_open_triangles(G, 3))
###Output
_____no_output_____
###Markdown
Triangle closure is also the core idea behind social networks' friend recommendation systems; of course, it's definitely more complicated than what we've implemented here. CliquesWe have figured out how to find triangles. Now, let's find out what **cliques** are present in the network. Recall: what is the definition of a clique?- NetworkX has a [clique-finding](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.clique.find_cliques.html) algorithm implemented.- This algorithm finds all maximally-sized cliques for a given node.- Note that maximal cliques of size `n` include all cliques of `size < n`
###Code
list(nx.find_cliques(G))
###Output
_____no_output_____
###Markdown
ExerciseTry writing a function `maximal_cliques_of_size(size, G)` that implements a search for all maximal cliques of a given size. (3 min.)
###Code
def maximal_cliqes_of_size(size, G):
return ______________________
maximal_cliqes_of_size(2, G)
###Output
_____no_output_____
###Markdown
Connected ComponentsFrom [Wikipedia](https://en.wikipedia.org/wiki/Connected_component_%28graph_theory%29):> In graph theory, a connected component (or just component) of an undirected graph is a subgraph in which any two vertices are connected to each other by paths, and which is connected to no additional vertices in the supergraph.NetworkX also implements a [function](https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.components.connected.connected_component_subgraphs.html) that identifies connected component subgraphs.Remember how based on the Circos plot above, we had this hypothesis that the physician trust network may be divided into subgraphs. Let's check that, and see if we can redraw the Circos visualization.
###Code
ccsubgraphs = list(nx.connected_component_subgraphs(G))
len(ccsubgraphs)
###Output
_____no_output_____
###Markdown
ExerciseDraw a circos plot of the graph, but now colour and order the nodes by their connected component subgraph. (5 min.)Recall Circos API:```pythonc = CircosPlot(G, node_order='...', node_color='...')c.draw()plt.show() or plt.savefig(...)```
###Code
# Start by labelling each node in the master graph G by some number
# that represents the subgraph that contains the node.
for i, g in enumerate(_____________):
# Fill in code below.
c = CircosPlot(G, _________)
c.draw()
plt.savefig('images/physicians.png', dpi=300)
###Output
_____no_output_____
###Markdown
Load DataAs usual, let's start by loading some network data. This time round, we have a [physician trust](http://konect.uni-koblenz.de/networks/moreno_innovation) network, but slightly modified such that it is undirected rather than directed.> This directed network captures innovation spread among 246 physicians in for towns in Illinois, Peoria, Bloomington, Quincy and Galesburg. The data was collected in 1966. A node represents a physician and an edge between two physicians shows that the left physician told that the righ physician is his friend or that he turns to the right physician if he needs advice or is interested in a discussion. There always only exists one edge between two nodes even if more than one of the listed conditions are true.
###Code
# Load the network.
G = cf.load_physicians_network()
# Make a Circos plot of the graph
from nxviz import CircosPlot
c = CircosPlot(G)
c.draw()
###Output
_____no_output_____
###Markdown
QuestionWhat can you infer about the structure of the graph from the Circos plot? Structures in a GraphWe can leverage what we have learned in the previous notebook to identify special structures in a graph. In a network, cliques are one of these special structures. CliquesIn a social network, cliques are groups of people in which everybody knows everybody. **Questions:**1. What is the simplest clique?1. What is the simplest complex clique?Let's try implementing a simple algorithm that finds out whether a node is present in a simple complex clique.
###Code
# Example code.
def in_triangle(G, node):
"""
Returns whether a given node is present in a triangle relationship or not.
"""
# We first assume that the node is not present in a triangle.
is_in_triangle = False
# Then, iterate over every pair of the node's neighbors.
for nbr1, nbr2 in combinations(G.neighbors(node), 2):
# Check to see if there is an edge between the node's neighbors.
# If there is an edge, then the given node is present in a triangle.
if G.has_edge(nbr1, nbr2):
is_in_triangle = True
# We break because any triangle that is present automatically
# satisfies the problem requirements.
break
return is_in_triangle
in_triangle(G, 3)
###Output
_____no_output_____
###Markdown
In reality, NetworkX already has a function that *counts* the number of triangles that any given node is involved in. This is probably more useful than knowing whether a node is present in a triangle or not, but the above code was simply for practice.
###Code
nx.triangles(G, 3)
###Output
_____no_output_____
###Markdown
ExerciseCan you write a function that takes in one node and its associated graph as an input, and returns a list or set of itself + all other nodes that it is in a triangle relationship with? Do not return the triplets, but the `set`/`list` of nodes. (5 min.)**Possible Implementation:** If I check every pair of my neighbors, any pair that are also connected in the graph are in a triangle relationship with me.Hint: Python's [`itertools`](https://docs.python.org/3/library/itertools.html) module has a `combinations` function that may be useful.Hint: NetworkX graphs have a `.has_edge(node1, node2)` function that checks whether an edge exists between two nodes.Verify your answer by drawing out the subgraph composed of those nodes.
###Code
# Possible answer
def get_triangles(G, node):
neighbors = set(G.neighbors(node))
triangle_nodes = set()
"""
Fill in the rest of the code below.
"""
return triangle_nodes
# Verify your answer with the following funciton call. Should return something of the form:
# {3, 9, 11, 41, 42, 67}
get_triangles(G, 3)
# Then, draw out those nodes.
nx.draw(G.subgraph(get_triangles(G, 3)), with_labels=True)
# Compare for yourself that those are the only triangles that node 3 is involved in.
neighbors3 = G.neighbors(3)
neighbors3.append(3)
nx.draw(G.subgraph(neighbors3), with_labels=True)
###Output
_____no_output_____
###Markdown
Friend Recommendation: Open TrianglesNow that we have some code that identifies closed triangles, we might want to see if we can do some friend recommendations by looking for open triangles.Open triangles are like those that we described earlier on - A knows B and B knows C, but C's relationship with A isn't captured in the graph. What are the two general scenarios for finding open triangles that a given node is involved in?1. The given node is the centre node.1. The given node is one of the termini nodes. ExerciseCan you write a function that identifies, for a given node, the other two nodes that it is involved with in an open triangle, if there is one? (5 min.)Note: For this exercise, only consider the case when the node of interest is the centre node.**Possible Implementation:** Check every pair of my neighbors, and if they are not connected to one another, then we are in an open triangle relationship.
###Code
# Fill in your code here.
def get_open_triangles(G, node):
"""
There are many ways to represent this. One may choose to represent only the nodes involved
in an open triangle; this is not the approach taken here.
Rather, we have a code that explicitly enumrates every open triangle present.
"""
open_triangle_nodes = []
neighbors = set(G.neighbors(node))
for n1, n2 in combinations(G.neighbors(node), 2):
...
return open_triangle_nodes
# # Uncomment the following code if you want to draw out each of the triplets.
# nodes = get_open_triangles(G, 2)
# for i, triplet in enumerate(nodes):
# fig = plt.figure(i)
# nx.draw(G.subgraph(triplet), with_labels=True)
print(get_open_triangles(G, 3))
len(get_open_triangles(G, 3))
###Output
_____no_output_____
###Markdown
Triangle closure is also the core idea behind social networks' friend recommendation systems; of course, it's definitely more complicated than what we've implemented here. CliquesWe have figured out how to find triangles. Now, let's find out what **cliques** are present in the network. Recall: what is the definition of a clique?- NetworkX has a [clique-finding](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.clique.find_cliques.html) algorithm implemented.- This algorithm finds all maximally-sized cliques for a given node.- Note that maximal cliques of size `n` include all cliques of `size < n`
###Code
list(nx.find_cliques(G))
###Output
_____no_output_____
###Markdown
ExerciseTry writing a function `maximal_cliques_of_size(size, G)` that implements a search for all maximal cliques of a given size. (3 min.)
###Code
def maximal_cliqes_of_size(size, G):
return ______________________
maximal_cliqes_of_size(2, G)
###Output
_____no_output_____
###Markdown
Connected ComponentsFrom [Wikipedia](https://en.wikipedia.org/wiki/Connected_component_%28graph_theory%29):> In graph theory, a connected component (or just component) of an undirected graph is a subgraph in which any two vertices are connected to each other by paths, and which is connected to no additional vertices in the supergraph.NetworkX also implements a [function](https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.components.connected.connected_component_subgraphs.html) that identifies connected component subgraphs.Remember how based on the Circos plot above, we had this hypothesis that the physician trust network may be divided into subgraphs. Let's check that, and see if we can redraw the Circos visualization.
###Code
ccsubgraphs = list(nx.connected_component_subgraphs(G))
len(ccsubgraphs)
###Output
_____no_output_____
###Markdown
ExerciseDraw a circos plot of the graph, but now colour and order the nodes by their connected component subgraph. (5 min.)Recall Circos API:```pythonc = CircosPlot(G, node_order='...', node_color='...')c.draw()plt.show() or plt.savefig(...)```
###Code
# Start by labelling each node in the master graph G by some number
# that represents the subgraph that contains the node.
for i, g in enumerate(_____________):
# Fill in code below.
c = CircosPlot(G, _________)
c.draw()
plt.savefig('images/physicians.png', dpi=300)
###Output
_____no_output_____ |
modelos/00.Full dataset/00_Pruebas modelos.ipynb | ###Markdown
Modelos Neuronales y secuenciales (Full Dataset) Carga de librerías y preparación de datos Antes de preparar todo, elegimos el modelo a utilizar y alguna variable adicional:
###Code
skipDenseAct = True
skipLSTMAct = False
skipLSTM24Seg = False
skipLSTM24Agr = False
###Output
_____no_output_____
###Markdown
Para elegir qué celdas se ejecután y cuáles no: Ahora pasamos a la carga de librerías y preparación de datos:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import keras.backend as K
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.models import Model, model_from_json
from keras.layers import (Embedding, Conv1D, MaxPooling1D, LSTM, GRU, Dense, Flatten, Input, Dropout,
BatchNormalization, TimeDistributed,Reshape, Multiply, Activation)
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras import regularizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.losses import mean_squared_error
from keras.utils.vis_utils import plot_model
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
from importlib.machinery import SourceFileLoader
from keras.utils import plot_model
#LMG: Para Google la librería que controla qué ejecutar:
try:
from google.colab import drive
drive.mount('/content/drive')
path = '/content/drive/My Drive/TFM/01.Utils/ESIOS_contoller.py'
path_cell_controller = '/content/drive/My Drive/TFM/01.Utils/skip_kernel_extension.py'
in_colab = True
except:
path = '../utils/ESIOS_contoller.py'
path_cell_controller = '../utils/skip_kernel_extension.py'
in_colab = False
path_cell_controller = '/content/drive/My Drive/TFM/01.Utils/skip_kernel_extension.py'
cell_controller = SourceFileLoader('cell_controller', path_cell_controller).load_module()
%load_ext cell_controller
esios_assembler = SourceFileLoader('esios', path).load_module()
esios_controller = esios_assembler.ESIOS(in_colab)
data_consumo = esios_controller.get_data()
data_consumo.head()
###Output
_____no_output_____
###Markdown
Predicción hora actual con los datos actuales Sería la predicción del valor una vez tenemos todas las exógenas, pero no el precio.Como variables, vamos a seleccionar el precio del día anterior, el valor diferencial y la energía solar.El valor diferencial ha de ser entre el día de ayer y el de antes de ayer, ya que el valor de hoy no lo tenemos a priori (sería el objetivo a calcular). Por ello, el cálculo de ésta empezará el día 3. De manera similar, el precio de ayer lo tendremos un día depués:
###Code
#------
#X_data
#------
x_data = data_consumo[['Solar','Demanda','Eolica','Holiday']].copy()
#LMG: Add to x_data PVPC (y_data) delayed one day:
x_data['PVPC_yesterday'] = data_consumo['PVPC_DEF'].shift(periods=1, axis = 0)
x_data['PVPC_yesterday'].loc[0] = data_consumo['PVPC_DEF'].mean()
#LMG: Add to x_data PVPC the differential value between yesterday and the day before
data_1 = data_consumo['PVPC_DEF'].shift(periods=1, axis = 0)
data_2 = data_consumo['PVPC_DEF'].shift(periods=2, axis = 0)
x_data['PVPC_stationary_yesterday'] = (data_1 - data_2)
x_data['PVPC_stationary_yesterday'].iloc[0:2] = 0
#LMG: Then get the x_data columns
x_data_columns = x_data.columns
#------
#Y_data
#------
y_data = data_consumo[['PVPC_DEF']].copy()
y_data_columns = y_data.columns
x_data.head()
y_data.head()
###Output
_____no_output_____
###Markdown
Separamos los datos en train y test, y los normalizamos:
###Code
#Min-max scaler
scaler = MinMaxScaler()
x_data_norm = scaler.fit_transform(x_data)
y_data_norm = scaler.fit_transform(y_data)
# Split the data
x_train, x_valid, y_train, y_valid = train_test_split(x_data_norm, y_data_norm, test_size=0.33, shuffle=False)
#Convert NaN values to 0:
x_train = np.nan_to_num(x_train)
x_valid = np.nan_to_num(x_valid)
y_train = np.nan_to_num(y_train)
y_valid = np.nan_to_num(y_valid)
print('Xtrain_dim:', x_train.shape)
print('Ytrain_dim:', y_train.shape)
###Output
Xtrain_dim: (20471, 6)
Ytrain_dim: (20471, 1)
###Markdown
Veamos cómo queda:
###Code
x_train_DF = pd.DataFrame(x_train, columns = x_data_columns)
x_train_DF.head()
###Output
_____no_output_____
###Markdown
Por último, generamos funciones y variables de utilidad a lo largo de todos los modelos:
###Code
def rmse (y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred -y_true), axis=-1))
###Output
_____no_output_____
###Markdown
NN Red neuronal densa como punto de partida. Preparación de datos Primero, estandarizamos los **datos de entrenamiento**:
###Code
%%skip $skipDenseAct
scaler = MinMaxScaler()
x_train_data = scaler.fit_transform(x_train)
y_train_data = scaler.fit_transform(y_train)
x_valid_data = x_valid
y_valid_data = y_valid
print('Entrenamiento:',x_train_data.shape, y_train_data.shape)
print('Test:',x_valid_data.shape, y_valid_data.shape)
###Output
_____no_output_____
###Markdown
Para visualizar (ver apartado Visualización de dataframes al final):
###Code
%%skip $skipDenseAct
x_train_data_DF = pd.DataFrame(x_train_data, columns = x_data_columns)
display(x_train_data_DF[0:5])
%%skip $skipDenseAct
y_train_data_DF = pd.DataFrame(y_train_data, columns = y_data_columns)
display(y_train_data_DF[0:5])
###Output
_____no_output_____
###Markdown
Por último, definimos los tamaños de nuestra red y algunas variables adicionales Modelo
###Code
%%skip $skipDenseAct
#Carga variables:
split_steps = 5
num_steps = split_steps
output_size = y_train_data.shape[1]
batch_size = 30
%%skip $skipDenseAct
# define model
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(num_var,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(output_size, activation='linear'))
# compile mode
model.compile(optimizer='adam', loss='mse', metrics=['mse',rmse,'mae','mape'])
print(model.summary())
###Output
_____no_output_____
###Markdown
Entrenamiento
###Code
%%skip $skipDenseAct
history_train = model.fit(x_train_data, y_train_data,
epochs=100,
batch_size=batch_size,
shuffle=False,
verbose=1)
# summarize history for loss, MAPE in a different scale than the others
plt.plot(history_train.history['mean_squared_error'])
plt.plot(history_train.history['mean_absolute_error'])
plt.plot(history_train.history['rmse'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MSE','MAE','RMSE'], loc='upper left')
plt.show()
plt.plot(history_train.history['mean_absolute_percentage_error'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MAPE'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Test Para el test:
###Code
%%skip $skipDenseAct
scores_test = model.evaluate(x_valid_data, y_valid_data, batch_size=batch_size, verbose=1)
# summarize loss
for element in range(len(scores_test)):
print("%s: %.2f" % (model.metrics_names[element], scores_test[element]))
%%skip $skipDenseAct
x_valid_data_DF = pd.DataFrame(x_valid_data, columns = x_data_columns)
display(x_valid_data_DF[0:5])
%%skip $skipDenseAct
y_valid_data_DF = pd.DataFrame(y_valid_data, columns = y_data_columns)
display(y_valid_data_DF[0:5])
%%skip $skipDenseAct
y_predict = model.predict(x_valid_data, batch_size=batch_size, verbose=1)
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.title('Real')
plt.subplot(313)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
###Output
_____no_output_____
###Markdown
LSTM Basado en el ejemplo completo que está explicado en https://adventuresinmachinelearning.com/keras-lstm-tutorial/ Preparación de datos
###Code
%%skip $skipLSTMAct
#Carga variables:
split_steps = 5
num_steps = split_steps
batch_size = 30
###Output
_____no_output_____
###Markdown
Para que se ejecute en paquetes, replicamos el dataset de n en n lotes:
###Code
%%skip $skipLSTMAct
# split a sequence into samples
def split_sequence(sequence, n_steps):
X = list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x = sequence[i:end_ix,:]
X.append(seq_x)
return np.array(X)
###Output
_____no_output_____
###Markdown
Después, estandarizamos los **datos de entrenamiento** y aplicamos la función a la x e y, agrupando las salidas por paquetes de num_steps:
###Code
%%skip $skipLSTMAct
x_train_est = x_train
y_train_est = y_train
x_train_data = split_sequence(x_train_est, split_steps)
y_train_data = y_train_est[split_steps:,:]
x_valid_data = split_sequence(x_valid, split_steps)
y_valid_data = y_valid[split_steps:,:]
print('Entrenamiento:',x_train_data.shape, y_train_data.shape)
print('Test:',x_valid_data.shape, y_valid_data.shape)
###Output
Entrenamiento: (20466, 5, 6) (20466, 1)
Test: (10079, 5, 6) (10079, 1)
###Markdown
Para visualizar uno de los datos que van a entrar (ver apartado Visualización de dataframes al final):
###Code
%%skip $skipLSTMAct
x_train_data_DF = pd.DataFrame(x_train_data[0,:,:], columns = x_data_columns)
display(x_train_data_DF[0:5])
%%skip $skipLSTMAct
y_train_data_DF = pd.DataFrame(y_train_data[0:split_steps], columns = y_data_columns)
display(y_train_data_DF[0:5])
###Output
_____no_output_____
###Markdown
Por último, definimos los tamaños de nuestra red y algunas variables adicionales:En nuestro caso, la entrada va a ser de *1* **x** *nº de datos tomados* **x** *nº de variables independientes*, siendo:- batch_size, para cada instante tomamos los datos en paquetes.- nº de datos tomados o num_steps, esto es, en el análisis secuencial cada registro energético completo con el que contamos, o en términos más coloquiales, cuántas filas del dataset se tienen en cuenta .- nº de variables independientes o num_var, cada registro en los datos de entrada, o en términos más coloquiales, cada columna del dataset.Además:- hidden_size, número de unidades en cada célula del LSTM.- A la salida, tenemos los tres valores a estimar.
###Code
%%skip $skipLSTMAct
#Carga variables:
num_var = x_train_data.shape[2]
output_size = y_train_data.shape[1]
###Output
_____no_output_____
###Markdown
Modelo A la capa LSTM se le pasa cada vez un instante, siendo el primero el t1, el segundo el t2, etc.Respecto al ejemplo en la página, hemos:- Quitado el embedding inicial, ya que no necesitamos codificar la entrada (ya son valores en sí mismos).- Cambiado el hidden-layer-size. En la página dicen que se suele poner al tamaño de entrada de cada registro. Viene a ser el símil de unidades en una capa densa.- Dejado una única capa LSTM.- Eliminado el dropout.- TimeDistributed usa una capa densa para cada step del entrenamiento. La quitamos también y dejamos la densa exclusivamente, ya que hace la salida muy grande.Además:- No usamos regularización ni Dropout, ya que no hay problemas de overfitting.- Usamos normalización entre capas y a la entrada (BatchNormalization). Por ello, nos ahorramos calcular los biases, ya que no tiene sentido.Así:
###Code
%%skip $skipLSTMAct
# define model
model = Sequential()
#LSTM tiene por defecto inicialización Xavier, que para problemas lineales es la recomendada
model.add(LSTM(100, input_shape=(num_steps,num_var)))
model.add(Activation("relu"))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dense(output_size))
model.add(Activation("linear"))
# compile mode
model.compile(optimizer='adam', loss='mse', metrics=['mse',rmse,'mae','mape'])
model.summary()
%%skip $skipLSTMAct
plot_model(model,show_shapes=True)
###Output
_____no_output_____
###Markdown
Entrenamiento
###Code
%%skip $skipLSTMAct
print('LSTM')
history_train = model.fit(x_train_data, y_train_data,
epochs=100,
batch_size=batch_size,
shuffle=False,
verbose=1)
# summarize history for loss, MAPE in a different scale than the others
plt.plot(history_train.history['mean_squared_error'])
plt.plot(history_train.history['mean_absolute_error'])
plt.plot(history_train.history['rmse'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MSE','MAE','RMSE'], loc='upper left')
plt.show()
plt.plot(history_train.history['mean_absolute_percentage_error'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MAPE'], loc='upper left')
plt.show()
###Output
LSTM
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3005: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
Epoch 1/100
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.
20466/20466 [==============================] - 6s 306us/step - loss: 0.0067 - mean_squared_error: 0.0067 - rmse: 0.0563 - mean_absolute_error: 0.0563 - mean_absolute_percentage_error: 5511.5520
Epoch 2/100
20466/20466 [==============================] - 5s 241us/step - loss: 0.0033 - mean_squared_error: 0.0033 - rmse: 0.0435 - mean_absolute_error: 0.0435 - mean_absolute_percentage_error: 7203.1485
Epoch 3/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0028 - mean_squared_error: 0.0028 - rmse: 0.0404 - mean_absolute_error: 0.0404 - mean_absolute_percentage_error: 6903.1934
Epoch 4/100
20466/20466 [==============================] - 5s 243us/step - loss: 0.0026 - mean_squared_error: 0.0026 - rmse: 0.0386 - mean_absolute_error: 0.0386 - mean_absolute_percentage_error: 6420.1491
Epoch 5/100
20466/20466 [==============================] - 5s 240us/step - loss: 0.0024 - mean_squared_error: 0.0024 - rmse: 0.0374 - mean_absolute_error: 0.0374 - mean_absolute_percentage_error: 6469.6980
Epoch 6/100
20466/20466 [==============================] - 5s 243us/step - loss: 0.0023 - mean_squared_error: 0.0023 - rmse: 0.0366 - mean_absolute_error: 0.0366 - mean_absolute_percentage_error: 6413.1263
Epoch 7/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0022 - mean_squared_error: 0.0022 - rmse: 0.0360 - mean_absolute_error: 0.0360 - mean_absolute_percentage_error: 5694.2044
Epoch 8/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0022 - mean_squared_error: 0.0022 - rmse: 0.0356 - mean_absolute_error: 0.0356 - mean_absolute_percentage_error: 5053.2532
Epoch 9/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0352 - mean_absolute_error: 0.0352 - mean_absolute_percentage_error: 5021.5730
Epoch 10/100
20466/20466 [==============================] - 5s 241us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0349 - mean_absolute_error: 0.0349 - mean_absolute_percentage_error: 4288.6984
Epoch 11/100
20466/20466 [==============================] - 5s 240us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0345 - mean_absolute_error: 0.0345 - mean_absolute_percentage_error: 4300.8812
Epoch 12/100
20466/20466 [==============================] - 5s 235us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0341 - mean_absolute_error: 0.0341 - mean_absolute_percentage_error: 4177.4386
Epoch 13/100
20466/20466 [==============================] - 5s 238us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0341 - mean_absolute_error: 0.0341 - mean_absolute_percentage_error: 4403.5316
Epoch 14/100
20466/20466 [==============================] - 5s 232us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0339 - mean_absolute_error: 0.0339 - mean_absolute_percentage_error: 4732.1148
Epoch 15/100
20466/20466 [==============================] - 5s 235us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0338 - mean_absolute_error: 0.0338 - mean_absolute_percentage_error: 4357.4956
Epoch 16/100
20466/20466 [==============================] - 5s 237us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0341 - mean_absolute_error: 0.0341 - mean_absolute_percentage_error: 4844.1852
Epoch 17/100
20466/20466 [==============================] - 5s 233us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0340 - mean_absolute_error: 0.0340 - mean_absolute_percentage_error: 4679.5469
Epoch 18/100
20466/20466 [==============================] - 5s 238us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0336 - mean_absolute_error: 0.0336 - mean_absolute_percentage_error: 2979.2769
Epoch 19/100
20466/20466 [==============================] - 5s 236us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0340 - mean_absolute_error: 0.0340 - mean_absolute_percentage_error: 4948.2045
Epoch 20/100
20466/20466 [==============================] - 5s 242us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0337 - mean_absolute_error: 0.0337 - mean_absolute_percentage_error: 5276.6286
Epoch 21/100
20466/20466 [==============================] - 5s 242us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0335 - mean_absolute_error: 0.0335 - mean_absolute_percentage_error: 4990.6801
Epoch 22/100
20466/20466 [==============================] - 5s 239us/step - loss: 0.0021 - mean_squared_error: 0.0021 - rmse: 0.0338 - mean_absolute_error: 0.0338 - mean_absolute_percentage_error: 4927.0633
Epoch 23/100
20466/20466 [==============================] - 5s 234us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0336 - mean_absolute_error: 0.0336 - mean_absolute_percentage_error: 5398.2451
Epoch 24/100
20466/20466 [==============================] - 5s 241us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0335 - mean_absolute_error: 0.0335 - mean_absolute_percentage_error: 5613.4590
Epoch 25/100
20466/20466 [==============================] - 5s 234us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0328 - mean_absolute_error: 0.0328 - mean_absolute_percentage_error: 5719.3368
Epoch 26/100
20466/20466 [==============================] - 5s 242us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0331 - mean_absolute_error: 0.0331 - mean_absolute_percentage_error: 5761.2247
Epoch 27/100
20466/20466 [==============================] - 5s 239us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0330 - mean_absolute_error: 0.0330 - mean_absolute_percentage_error: 5751.6946
Epoch 28/100
20466/20466 [==============================] - 5s 239us/step - loss: 0.0020 - mean_squared_error: 0.0020 - rmse: 0.0327 - mean_absolute_error: 0.0327 - mean_absolute_percentage_error: 5517.7208
Epoch 29/100
20466/20466 [==============================] - 5s 238us/step - loss: 0.0019 - mean_squared_error: 0.0019 - rmse: 0.0326 - mean_absolute_error: 0.0326 - mean_absolute_percentage_error: 5794.6993
Epoch 30/100
20466/20466 [==============================] - 5s 236us/step - loss: 0.0019 - mean_squared_error: 0.0019 - rmse: 0.0323 - mean_absolute_error: 0.0323 - mean_absolute_percentage_error: 5706.2129
Epoch 31/100
20466/20466 [==============================] - 5s 238us/step - loss: 0.0018 - mean_squared_error: 0.0018 - rmse: 0.0319 - mean_absolute_error: 0.0319 - mean_absolute_percentage_error: 5325.8846
Epoch 32/100
20466/20466 [==============================] - 5s 239us/step - loss: 0.0018 - mean_squared_error: 0.0018 - rmse: 0.0317 - mean_absolute_error: 0.0317 - mean_absolute_percentage_error: 5518.1264
Epoch 33/100
20466/20466 [==============================] - 5s 236us/step - loss: 0.0018 - mean_squared_error: 0.0018 - rmse: 0.0315 - mean_absolute_error: 0.0315 - mean_absolute_percentage_error: 5479.2419
Epoch 34/100
20466/20466 [==============================] - 5s 235us/step - loss: 0.0018 - mean_squared_error: 0.0018 - rmse: 0.0312 - mean_absolute_error: 0.0312 - mean_absolute_percentage_error: 5230.0743
Epoch 35/100
20466/20466 [==============================] - 5s 234us/step - loss: 0.0017 - mean_squared_error: 0.0017 - rmse: 0.0309 - mean_absolute_error: 0.0309 - mean_absolute_percentage_error: 5087.1350
Epoch 36/100
20466/20466 [==============================] - 5s 238us/step - loss: 0.0017 - mean_squared_error: 0.0017 - rmse: 0.0306 - mean_absolute_error: 0.0306 - mean_absolute_percentage_error: 4755.0194
Epoch 37/100
20466/20466 [==============================] - 5s 237us/step - loss: 0.0017 - mean_squared_error: 0.0017 - rmse: 0.0302 - mean_absolute_error: 0.0302 - mean_absolute_percentage_error: 4726.8831
Epoch 38/100
20466/20466 [==============================] - 5s 239us/step - loss: 0.0017 - mean_squared_error: 0.0017 - rmse: 0.0302 - mean_absolute_error: 0.0302 - mean_absolute_percentage_error: 4327.3111
Epoch 39/100
20466/20466 [==============================] - 5s 242us/step - loss: 0.0016 - mean_squared_error: 0.0016 - rmse: 0.0300 - mean_absolute_error: 0.0300 - mean_absolute_percentage_error: 4224.1599
Epoch 40/100
20466/20466 [==============================] - 5s 242us/step - loss: 0.0016 - mean_squared_error: 0.0016 - rmse: 0.0297 - mean_absolute_error: 0.0297 - mean_absolute_percentage_error: 3852.3160
Epoch 41/100
20466/20466 [==============================] - 5s 257us/step - loss: 0.0016 - mean_squared_error: 0.0016 - rmse: 0.0296 - mean_absolute_error: 0.0296 - mean_absolute_percentage_error: 3677.6189
Epoch 42/100
20466/20466 [==============================] - 5s 254us/step - loss: 0.0016 - mean_squared_error: 0.0016 - rmse: 0.0294 - mean_absolute_error: 0.0294 - mean_absolute_percentage_error: 3816.4822
Epoch 43/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0016 - mean_squared_error: 0.0016 - rmse: 0.0292 - mean_absolute_error: 0.0292 - mean_absolute_percentage_error: 3630.2950
Epoch 44/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0015 - mean_squared_error: 0.0015 - rmse: 0.0290 - mean_absolute_error: 0.0290 - mean_absolute_percentage_error: 3558.8931
Epoch 45/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0015 - mean_squared_error: 0.0015 - rmse: 0.0288 - mean_absolute_error: 0.0288 - mean_absolute_percentage_error: 3336.9631
Epoch 46/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0015 - mean_squared_error: 0.0015 - rmse: 0.0287 - mean_absolute_error: 0.0287 - mean_absolute_percentage_error: 3563.1745
Epoch 47/100
20466/20466 [==============================] - 5s 243us/step - loss: 0.0015 - mean_squared_error: 0.0015 - rmse: 0.0285 - mean_absolute_error: 0.0285 - mean_absolute_percentage_error: 3471.2706
Epoch 48/100
20466/20466 [==============================] - 5s 241us/step - loss: 0.0015 - mean_squared_error: 0.0015 - rmse: 0.0283 - mean_absolute_error: 0.0283 - mean_absolute_percentage_error: 3446.4801
Epoch 49/100
20466/20466 [==============================] - 5s 251us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0281 - mean_absolute_error: 0.0281 - mean_absolute_percentage_error: 3234.7399
Epoch 50/100
20466/20466 [==============================] - 5s 243us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0280 - mean_absolute_error: 0.0280 - mean_absolute_percentage_error: 3227.3873
Epoch 51/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0279 - mean_absolute_error: 0.0279 - mean_absolute_percentage_error: 3333.0488
Epoch 52/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0277 - mean_absolute_error: 0.0277 - mean_absolute_percentage_error: 3101.7583
Epoch 53/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0277 - mean_absolute_error: 0.0277 - mean_absolute_percentage_error: 3190.5238
Epoch 54/100
20466/20466 [==============================] - 5s 240us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0277 - mean_absolute_error: 0.0277 - mean_absolute_percentage_error: 3074.5398
Epoch 55/100
20466/20466 [==============================] - 5s 239us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0276 - mean_absolute_error: 0.0276 - mean_absolute_percentage_error: 3086.7829
Epoch 56/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0275 - mean_absolute_error: 0.0275 - mean_absolute_percentage_error: 2883.7064
Epoch 57/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0273 - mean_absolute_error: 0.0273 - mean_absolute_percentage_error: 2790.6832
Epoch 58/100
20466/20466 [==============================] - 5s 249us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0273 - mean_absolute_error: 0.0273 - mean_absolute_percentage_error: 2889.2434
Epoch 59/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0014 - mean_squared_error: 0.0014 - rmse: 0.0273 - mean_absolute_error: 0.0273 - mean_absolute_percentage_error: 2854.3036
Epoch 60/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0271 - mean_absolute_error: 0.0271 - mean_absolute_percentage_error: 2551.5316
Epoch 61/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0269 - mean_absolute_error: 0.0269 - mean_absolute_percentage_error: 1731.3897
Epoch 62/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0270 - mean_absolute_error: 0.0270 - mean_absolute_percentage_error: 2835.7869
Epoch 63/100
20466/20466 [==============================] - 5s 251us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0269 - mean_absolute_error: 0.0269 - mean_absolute_percentage_error: 2957.2962
Epoch 64/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0268 - mean_absolute_error: 0.0268 - mean_absolute_percentage_error: 3067.5869
Epoch 65/100
20466/20466 [==============================] - 5s 242us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0267 - mean_absolute_error: 0.0267 - mean_absolute_percentage_error: 3135.8982
Epoch 66/100
20466/20466 [==============================] - 5s 240us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0265 - mean_absolute_error: 0.0265 - mean_absolute_percentage_error: 2981.5702
Epoch 67/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0265 - mean_absolute_error: 0.0265 - mean_absolute_percentage_error: 3146.7294
Epoch 68/100
20466/20466 [==============================] - 5s 243us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0264 - mean_absolute_error: 0.0264 - mean_absolute_percentage_error: 2941.9599
Epoch 69/100
20466/20466 [==============================] - 5s 251us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0263 - mean_absolute_error: 0.0263 - mean_absolute_percentage_error: 2709.9681
Epoch 70/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0263 - mean_absolute_error: 0.0263 - mean_absolute_percentage_error: 2860.3013
Epoch 71/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0013 - mean_squared_error: 0.0013 - rmse: 0.0263 - mean_absolute_error: 0.0263 - mean_absolute_percentage_error: 2955.0394
Epoch 72/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0262 - mean_absolute_error: 0.0262 - mean_absolute_percentage_error: 2968.4285
Epoch 73/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0260 - mean_absolute_error: 0.0260 - mean_absolute_percentage_error: 2288.1777
Epoch 74/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0260 - mean_absolute_error: 0.0260 - mean_absolute_percentage_error: 2941.6597
Epoch 75/100
20466/20466 [==============================] - 5s 244us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0259 - mean_absolute_error: 0.0259 - mean_absolute_percentage_error: 2827.8173
Epoch 76/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0259 - mean_absolute_error: 0.0259 - mean_absolute_percentage_error: 3181.7984
Epoch 77/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0258 - mean_absolute_error: 0.0258 - mean_absolute_percentage_error: 2856.6856
Epoch 78/100
20466/20466 [==============================] - 5s 249us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0257 - mean_absolute_error: 0.0257 - mean_absolute_percentage_error: 2958.2563
Epoch 79/100
20466/20466 [==============================] - 5s 251us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0256 - mean_absolute_error: 0.0256 - mean_absolute_percentage_error: 2656.3526
Epoch 80/100
20466/20466 [==============================] - 5s 250us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0257 - mean_absolute_error: 0.0257 - mean_absolute_percentage_error: 2975.8788
Epoch 81/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0257 - mean_absolute_error: 0.0257 - mean_absolute_percentage_error: 2742.4676
Epoch 82/100
20466/20466 [==============================] - 5s 244us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0256 - mean_absolute_error: 0.0256 - mean_absolute_percentage_error: 3033.5699
Epoch 83/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0256 - mean_absolute_error: 0.0256 - mean_absolute_percentage_error: 2596.9574
Epoch 84/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0253 - mean_absolute_error: 0.0253 - mean_absolute_percentage_error: 2897.4868
Epoch 85/100
20466/20466 [==============================] - 5s 240us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0254 - mean_absolute_error: 0.0254 - mean_absolute_percentage_error: 2631.4924
Epoch 86/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0012 - mean_squared_error: 0.0012 - rmse: 0.0253 - mean_absolute_error: 0.0253 - mean_absolute_percentage_error: 2753.9924
Epoch 87/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0252 - mean_absolute_error: 0.0252 - mean_absolute_percentage_error: 2562.5806
Epoch 88/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0251 - mean_absolute_error: 0.0251 - mean_absolute_percentage_error: 2512.1351
Epoch 89/100
20466/20466 [==============================] - 5s 245us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0251 - mean_absolute_error: 0.0251 - mean_absolute_percentage_error: 2593.2288
Epoch 90/100
20466/20466 [==============================] - 5s 244us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0250 - mean_absolute_error: 0.0250 - mean_absolute_percentage_error: 2380.3508
Epoch 91/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0250 - mean_absolute_error: 0.0250 - mean_absolute_percentage_error: 2175.8663
Epoch 92/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0249 - mean_absolute_error: 0.0249 - mean_absolute_percentage_error: 2020.7047
Epoch 93/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0249 - mean_absolute_error: 0.0249 - mean_absolute_percentage_error: 2254.1636
Epoch 94/100
20466/20466 [==============================] - 5s 253us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0248 - mean_absolute_error: 0.0248 - mean_absolute_percentage_error: 1954.1215
Epoch 95/100
20466/20466 [==============================] - 5s 244us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0247 - mean_absolute_error: 0.0247 - mean_absolute_percentage_error: 2194.7188
Epoch 96/100
20466/20466 [==============================] - 5s 251us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0246 - mean_absolute_error: 0.0246 - mean_absolute_percentage_error: 2055.1375
Epoch 97/100
20466/20466 [==============================] - 5s 246us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0246 - mean_absolute_error: 0.0246 - mean_absolute_percentage_error: 1993.5577
Epoch 98/100
20466/20466 [==============================] - 5s 250us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0245 - mean_absolute_error: 0.0245 - mean_absolute_percentage_error: 2095.3758
Epoch 99/100
20466/20466 [==============================] - 5s 250us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0245 - mean_absolute_error: 0.0245 - mean_absolute_percentage_error: 1877.2304
Epoch 100/100
20466/20466 [==============================] - 5s 263us/step - loss: 0.0011 - mean_squared_error: 0.0011 - rmse: 0.0245 - mean_absolute_error: 0.0245 - mean_absolute_percentage_error: 2032.9168
###Markdown
Test Para el test:
###Code
%%skip $skipLSTMAct
scores_test = model.evaluate(x_valid_data, y_valid_data, batch_size=batch_size, verbose=1)
# summarize loss
for element in range(len(scores_test)):
print("%s: %.2f" % (model.metrics_names[element], scores_test[element]))
%%skip $skipLSTMAct
x_valid_data_DF = pd.DataFrame(x_valid_data[0,:,:], columns = x_data_columns)
display(x_valid_data_DF)
%%skip $skipLSTMAct
y_valid_data_DF = pd.DataFrame(y_valid_data[0:5,:], columns = y_data_columns)
display(y_valid_data_DF)
%%skip $skipLSTMAct
y_predict = model.predict(x_valid_data, batch_size=batch_size, verbose=1)
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.title('Real')
plt.subplot(313)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
###Output
10079/10079 [==============================] - 1s 57us/step
###Markdown
Plot realVamos a aplicarle el scaler inverse para ver las predicciones reales
###Code
%%skip $skipLSTMAct
y_predict_trans = scaler.inverse_transform(y_predict)
y_valid_trans = scaler.inverse_transform(y_valid_data)
%%skip $skipLSTMAct
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(y_predict_trans, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(y_valid_trans, c='orange', alpha=0.5)
plt.title('y_valid_trans')
plt.subplot(313)
plt.plot(y_predict_trans, c='seagreen', alpha=0.6)
plt.plot(y_valid_trans, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
###Output
_____no_output_____
###Markdown
Metrics real
###Code
%%skip $skipLSTM24Seg
esios_controller.get_metrics(y_valid_trans, y_predict_trans)
###Output
** --------------- Metrics: --------------- **
MSE: 37.160827897983395
RMSE: 6.095968167402401
MAE: 4.826628169413538
Log_MSE: 0.002480943758432873
MedianAE: 4.0264019775390665
MAPE: 3.883646958366608 %
SMAPE: 3.9784967945466008 %
Variance: 0.8011339098576693
Max Error: 26.58566589355469
** ---------------------------------------- **
###Markdown
Predicción a 24 h con los datos actuales (segregado por horas)Sería la predicción de mañana una vez tenemos los datos de la hora actual, incluido el precio.En los ejercicios anteriores hemos calculado la predicción a la hora siguiente. En este ejercicio vamos a intentar calcular la predicción con la hora del día siguiente, para ello utilizaremos el PVPC_DEF como precio de la hora actual, el PVCP-target como el precio de la hora del día siguiente, y calcularemos la diferencia moviendose una ventana de 24 horas. LSTM Preparación de datos
###Code
%%skip $skipLSTM24Seg
#Carga variables:
split_steps = 5
num_steps = split_steps
batch_size = 30
%%skip $skipLSTM24Seg
# split a sequence into samples
def split_sequence(sequence, n_steps):
X = list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x = sequence[i:end_ix,:]
X.append(seq_x)
return np.array(X)
%%skip $skipLSTM24Seg
#------
#X_data
#------
x_data = data_consumo[['Solar','Demanda','Eolica','Holiday']].copy()
# ADD to x the current hour price, the target is already calculates in PVPC-target
x_data['PVPC_today'] = data_consumo['PVPC_DEF']
#GET the stationary data between 24 periods
data_1 = data_consumo['PVPC_DEF']
data_2 = data_consumo['PVPC_DEF'].shift(periods=24, axis = 0)
x_data['PVPC_stationary_yesterday'] = (data_1 - data_2)
x_data['PVPC_stationary_yesterday'].iloc[0:24] = 0
# Then get the x_data columns
x_data_columns = x_data.columns
#------
#Y_data
#------
# PVPC target tiene el precio de la hora del día siguiente
y_data = data_consumo[['PVPC-target']].copy()
y_data_columns = y_data.columns
%%skip $skipLSTM24Seg
x_data[0:2]
%%skip $skipLSTM24Seg
x_data[24:26]
%%skip $skipLSTM24Seg
y_data.head()
%%skip $skipLSTM24Seg
#Min-max scaler
scaler = MinMaxScaler()
x_data_norm = scaler.fit_transform(x_data)
y_data_norm = scaler.fit_transform(y_data)
# Split the data
x_train, x_valid, y_train, y_valid = train_test_split(x_data_norm, y_data_norm, test_size=0.33, shuffle=False)
#Convert NaN values to 0:
x_train = np.nan_to_num(x_train)
x_valid = np.nan_to_num(x_valid)
y_train = np.nan_to_num(y_train)
y_valid = np.nan_to_num(y_valid)
print('Xtrain_dim:', x_train.shape)
print('Ytrain_dim:', y_train.shape)
%%skip $skipLSTM24Seg
x_train_est = x_train
y_train_est = y_train
x_train_data = split_sequence(x_train_est, split_steps)
y_train_data = y_train_est[split_steps:,:]
x_valid_data = split_sequence(x_valid, split_steps)
y_valid_data = y_valid[split_steps:,:]
print('Entrenamiento:',x_train_data.shape, y_train_data.shape)
print('Test:',x_valid_data.shape, y_valid_data.shape)
%%skip $skipLSTM24Seg
#Carga variables:
num_var = x_train_data.shape[2]
output_size = y_train_data.shape[1]
###Output
_____no_output_____
###Markdown
ModeloMismo modelo que los entrenados anteriormente
###Code
%%skip $skipLSTM24Seg
# define model
model = Sequential()
#LSTM tiene por defecto inicialización Xavier, que para problemas lineales es la recomendada
model.add(LSTM(100, input_shape=(num_steps,num_var)))
model.add(Activation("relu"))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dense(output_size))
model.add(Activation("linear"))
# compile mode
model.compile(optimizer='adam', loss='mse', metrics=['mse',rmse,'mae','mape'])
model.summary()
%%skip $skipLSTM24Seg
print('LSTM')
history_train = model.fit(x_train_data, y_train_data,
epochs=100,
batch_size=batch_size,
shuffle=False,
verbose=1)
# summarize history for loss, MAPE in a different scale than the others
plt.plot(history_train.history['mean_squared_error'])
plt.plot(history_train.history['mean_absolute_error'])
plt.plot(history_train.history['rmse'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MSE','MAE','RMSE'], loc='upper left')
plt.show()
plt.plot(history_train.history['mean_absolute_percentage_error'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MAPE'], loc='upper left')
plt.show()
###Output
LSTM
Epoch 1/100
20466/20466 [==============================] - 7s 327us/step - loss: 0.0096 - mean_squared_error: 0.0096 - rmse: 0.0612 - mean_absolute_error: 0.0612 - mean_absolute_percentage_error: 10.3093
Epoch 2/100
20466/20466 [==============================] - 6s 287us/step - loss: 0.0041 - mean_squared_error: 0.0041 - rmse: 0.0480 - mean_absolute_error: 0.0480 - mean_absolute_percentage_error: 8.2247
Epoch 3/100
20466/20466 [==============================] - 5s 264us/step - loss: 0.0040 - mean_squared_error: 0.0040 - rmse: 0.0471 - mean_absolute_error: 0.0471 - mean_absolute_percentage_error: 8.0936
Epoch 4/100
20466/20466 [==============================] - 5s 262us/step - loss: 0.0039 - mean_squared_error: 0.0039 - rmse: 0.0466 - mean_absolute_error: 0.0466 - mean_absolute_percentage_error: 8.0395
Epoch 5/100
20466/20466 [==============================] - 5s 265us/step - loss: 0.0039 - mean_squared_error: 0.0039 - rmse: 0.0462 - mean_absolute_error: 0.0462 - mean_absolute_percentage_error: 7.9663
Epoch 6/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0038 - mean_squared_error: 0.0038 - rmse: 0.0458 - mean_absolute_error: 0.0458 - mean_absolute_percentage_error: 7.9018
Epoch 7/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0039 - mean_squared_error: 0.0039 - rmse: 0.0457 - mean_absolute_error: 0.0457 - mean_absolute_percentage_error: 7.8924
Epoch 8/100
20466/20466 [==============================] - 6s 277us/step - loss: 0.0039 - mean_squared_error: 0.0039 - rmse: 0.0456 - mean_absolute_error: 0.0456 - mean_absolute_percentage_error: 7.8676
Epoch 9/100
20466/20466 [==============================] - 5s 258us/step - loss: 0.0038 - mean_squared_error: 0.0038 - rmse: 0.0454 - mean_absolute_error: 0.0454 - mean_absolute_percentage_error: 7.8481
Epoch 10/100
20466/20466 [==============================] - 5s 259us/step - loss: 0.0038 - mean_squared_error: 0.0038 - rmse: 0.0454 - mean_absolute_error: 0.0454 - mean_absolute_percentage_error: 7.8429
Epoch 11/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0038 - mean_squared_error: 0.0038 - rmse: 0.0450 - mean_absolute_error: 0.0450 - mean_absolute_percentage_error: 7.7661
Epoch 12/100
20466/20466 [==============================] - 5s 254us/step - loss: 0.0038 - mean_squared_error: 0.0038 - rmse: 0.0449 - mean_absolute_error: 0.0449 - mean_absolute_percentage_error: 7.7450
Epoch 13/100
20466/20466 [==============================] - 5s 255us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0445 - mean_absolute_error: 0.0445 - mean_absolute_percentage_error: 7.6693
Epoch 14/100
20466/20466 [==============================] - 5s 255us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0444 - mean_absolute_error: 0.0444 - mean_absolute_percentage_error: 7.6620
Epoch 15/100
20466/20466 [==============================] - 5s 259us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0447 - mean_absolute_error: 0.0447 - mean_absolute_percentage_error: 7.6919
Epoch 16/100
20466/20466 [==============================] - 5s 253us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0443 - mean_absolute_error: 0.0443 - mean_absolute_percentage_error: 7.6365
Epoch 17/100
20466/20466 [==============================] - 5s 254us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0443 - mean_absolute_error: 0.0443 - mean_absolute_percentage_error: 7.6240
Epoch 18/100
20466/20466 [==============================] - 5s 261us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0441 - mean_absolute_error: 0.0441 - mean_absolute_percentage_error: 7.6082
Epoch 19/100
20466/20466 [==============================] - 5s 253us/step - loss: 0.0037 - mean_squared_error: 0.0037 - rmse: 0.0442 - mean_absolute_error: 0.0442 - mean_absolute_percentage_error: 7.6071
Epoch 20/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0440 - mean_absolute_error: 0.0440 - mean_absolute_percentage_error: 7.5716
Epoch 21/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0439 - mean_absolute_error: 0.0439 - mean_absolute_percentage_error: 7.5581
Epoch 22/100
20466/20466 [==============================] - 5s 250us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0437 - mean_absolute_error: 0.0437 - mean_absolute_percentage_error: 7.5242
Epoch 23/100
20466/20466 [==============================] - 5s 253us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0438 - mean_absolute_error: 0.0438 - mean_absolute_percentage_error: 7.5346
Epoch 24/100
20466/20466 [==============================] - 5s 255us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0437 - mean_absolute_error: 0.0437 - mean_absolute_percentage_error: 7.5135
Epoch 25/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0437 - mean_absolute_error: 0.0437 - mean_absolute_percentage_error: 7.5137
Epoch 26/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0436 - mean_absolute_error: 0.0436 - mean_absolute_percentage_error: 7.4986
Epoch 27/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0436 - mean_absolute_error: 0.0436 - mean_absolute_percentage_error: 7.4974
Epoch 28/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0435 - mean_absolute_error: 0.0435 - mean_absolute_percentage_error: 7.4846
Epoch 29/100
20466/20466 [==============================] - 5s 255us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0435 - mean_absolute_error: 0.0435 - mean_absolute_percentage_error: 7.4729
Epoch 30/100
20466/20466 [==============================] - 5s 254us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0434 - mean_absolute_error: 0.0434 - mean_absolute_percentage_error: 7.4596
Epoch 31/100
20466/20466 [==============================] - 5s 259us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0432 - mean_absolute_error: 0.0432 - mean_absolute_percentage_error: 7.4324
Epoch 32/100
20466/20466 [==============================] - 5s 259us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0434 - mean_absolute_error: 0.0434 - mean_absolute_percentage_error: 7.4487
Epoch 33/100
20466/20466 [==============================] - 5s 258us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0431 - mean_absolute_error: 0.0431 - mean_absolute_percentage_error: 7.4081
Epoch 34/100
20466/20466 [==============================] - 5s 249us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0429 - mean_absolute_error: 0.0429 - mean_absolute_percentage_error: 7.3688
Epoch 35/100
20466/20466 [==============================] - 5s 256us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0431 - mean_absolute_error: 0.0431 - mean_absolute_percentage_error: 7.3993
Epoch 36/100
20466/20466 [==============================] - 5s 252us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0431 - mean_absolute_error: 0.0431 - mean_absolute_percentage_error: 7.4003
Epoch 37/100
20466/20466 [==============================] - 5s 254us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0429 - mean_absolute_error: 0.0429 - mean_absolute_percentage_error: 7.3741
Epoch 38/100
20466/20466 [==============================] - 5s 253us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0430 - mean_absolute_error: 0.0430 - mean_absolute_percentage_error: 7.3899
Epoch 39/100
20466/20466 [==============================] - 5s 247us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0430 - mean_absolute_error: 0.0430 - mean_absolute_percentage_error: 7.3913
Epoch 40/100
20466/20466 [==============================] - 5s 243us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0430 - mean_absolute_error: 0.0430 - mean_absolute_percentage_error: 7.3874
Epoch 41/100
20466/20466 [==============================] - 5s 250us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0429 - mean_absolute_error: 0.0429 - mean_absolute_percentage_error: 7.3735
Epoch 42/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0429 - mean_absolute_error: 0.0429 - mean_absolute_percentage_error: 7.3750
Epoch 43/100
20466/20466 [==============================] - 5s 254us/step - loss: 0.0035 - mean_squared_error: 0.0035 - rmse: 0.0429 - mean_absolute_error: 0.0429 - mean_absolute_percentage_error: 7.3743
Epoch 44/100
20466/20466 [==============================] - 5s 248us/step - loss: 0.0034 - mean_squared_error: 0.0034 - rmse: 0.0424 - mean_absolute_error: 0.0424 - mean_absolute_percentage_error: 7.3042
Epoch 45/100
12420/20466 [=================>............] - ETA: 1s - loss: 0.0036 - mean_squared_error: 0.0036 - rmse: 0.0451 - mean_absolute_error: 0.0451 - mean_absolute_percentage_error: 7.8422
###Markdown
Plot
###Code
%%skip $skipLSTM24Seg
y_predict = model.predict(x_valid_data, batch_size=batch_size, verbose=1)
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.title('Real')
plt.subplot(313)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
###Output
_____no_output_____
###Markdown
Metrics
###Code
%%skip $skipLSTM24Seg
esios_controller.get_metrics(y_valid_data, y_predict)
###Output
_____no_output_____
###Markdown
Plot realVamos a aplicarle el scaler inverse para ver las predicciones reales
###Code
%%skip $skipLSTM24Seg
y_predict_trans = scaler.inverse_transform(y_predict)
y_valid_trans = scaler.inverse_transform(y_valid_data)
%%skip $skipLSTM24Seg
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(y_predict_trans, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(y_valid_trans, c='orange', alpha=0.5)
plt.title('y_valid_trans')
plt.subplot(313)
plt.plot(y_predict_trans, c='seagreen', alpha=0.6)
plt.plot(y_valid_trans, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
###Output
_____no_output_____
###Markdown
Metrics real
###Code
%%skip $skipLSTM24Seg
esios_controller.get_metrics(y_valid_trans, y_predict_trans)
###Output
_____no_output_____
###Markdown
Predicción a 24 h con los datos actuales (agrupado por día) LSTM Preparación de datosEn este caso nos vamos a traer los datos agrupados diariamente, aplicaremos el mismo proceso que hemos aplicado antes, y esta vez la diferencia entre el día de hoy y el anterior será de 1 período.
###Code
%%skip $skipLSTM24Agr
#Carga variables:
split_steps = 5
num_steps = split_steps
batch_size = 30
%%skip $skipLSTM24Agr
# split a sequence into samples
def split_sequence(sequence, n_steps):
X = list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x = sequence[i:end_ix,:]
X.append(seq_x)
return np.array(X)
%%skip $skipLSTM24Agr
x_data_grouped = esios_controller.get_df_daily()
x_data = list()
y_data = list()
#x_data = data_consumo.drop(['Unnamed: 0','Unnamed: 0.1','date_timestamp','fecha','date_day','PVPC_DEF','PVPC_2_PED_NOC','PVPC_ELEC_NOC'], axis=1).copy()
x_data = x_data_grouped[['Solar','Demanda','Eolica','Holiday']].copy()
# ADD to x the current hour price, the target is already calculates in PVPC-target
x_data['PVPC_today'] = x_data_grouped['PVPC_DEF']
#GET the stationary data between 24 periods
data_1 = x_data_grouped['PVPC_DEF']
data_2 = x_data_grouped['PVPC_DEF'].shift(periods=1, axis = 0)
x_data['PVPC_stationary_yesterday'] = (data_1 - data_2)
x_data['PVPC_stationary_yesterday'].iloc[0:1] = 0
# Then get the x_data columns
x_data_columns = x_data.columns
#------
#Y_data
#------
y_data = pd.DataFrame(x_data_grouped['PVPC_DEF'].shift(periods=-1, axis = 0).copy())
%%skip $skipLSTM24Agr
display(x_data[0:5])
%%skip $skipLSTM24Agr
display(y_data[0:5])
%%skip $skipLSTM24Agr
#Min-max scaler
scaler = MinMaxScaler()
x_data_norm = scaler.fit_transform(x_data)
y_data_norm = scaler.fit_transform(y_data)
# Split the data
x_train, x_valid, y_train, y_valid = train_test_split(x_data_norm, y_data_norm, test_size=0.33, shuffle=False)
#Convert NaN values to 0:
x_train = np.nan_to_num(x_train)
x_valid = np.nan_to_num(x_valid)
y_train = np.nan_to_num(y_train)
y_valid = np.nan_to_num(y_valid)
print('Xtrain_dim:', x_train.shape)
print('Ytrain_dim:', y_train.shape)
%%skip $skipLSTM24Agr
x_train_est = x_train
y_train_est = y_train
x_train_data = split_sequence(x_train_est, split_steps)
y_train_data = y_train_est[split_steps:,:]
x_valid_data = split_sequence(x_valid, split_steps)
y_valid_data = y_valid[split_steps:,:]
print('Entrenamiento:',x_train_data.shape, y_train_data.shape)
print('Test:',x_valid_data.shape, y_valid_data.shape)
%%skip $skipLSTM24Agr
#Carga variables:
num_var = x_train_data.shape[2]
output_size = y_train_data.shape[1]
###Output
_____no_output_____
###Markdown
ModeloMismo modelo que los entrenados anteriormente
###Code
%%skip $skipLSTM24Agr
# define model
model = Sequential()
#LSTM tiene por defecto inicialización Xavier, que para problemas lineales es la recomendada
model.add(LSTM(100, input_shape=(num_steps,num_var)))
model.add(Activation("relu"))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dense(output_size))
model.add(Activation("linear"))
# compile mode
model.compile(optimizer='adam', loss='mse', metrics=['mse',rmse,'mae','mape'])
model.summary()
plot_model(model,show_shapes=True)
%%skip $skipLSTM24Agr
print('LSTM')
history_train = model.fit(x_train_data, y_train_data,
epochs=300,
batch_size=batch_size,
shuffle=False,
verbose=1)
# summarize history for loss, MAPE in a different scale than the others
plt.plot(history_train.history['mean_squared_error'])
plt.plot(history_train.history['mean_absolute_error'])
plt.plot(history_train.history['rmse'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MSE','MAE','RMSE'], loc='upper left')
plt.show()
plt.plot(history_train.history['mean_absolute_percentage_error'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['MAPE'], loc='upper left')
plt.show()
%%skip $skipLSTM24Agr
y_predict = model.predict(x_valid_data, batch_size=batch_size, verbose=1)
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.title('Real')
plt.subplot(313)
plt.plot(y_predict, c='seagreen', alpha=0.6)
plt.plot(y_valid_data, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
%%skip $skipLSTM24Agr
esios_controller.get_metrics(y_valid_data, y_predict)
###Output
_____no_output_____
###Markdown
EncoderNos traemos el encoder y hacemos la predicción
###Code
%%skip $skipLSTM24Agr
y_predict_trans = scaler.inverse_transform(y_predict)
y_valid_trans = scaler.inverse_transform(y_valid_data)
predictions = pd.DataFrame(y_predict_trans)
print(np.shape(predictions))
print(np.shape(y_valid_trans))
%%skip $skipLSTM24Agr
predictions['Holiday'] = x_data_grouped[-458:]['Holiday'].values
np.shape(predictions)
###Output
_____no_output_____
###Markdown
importar el encoder
###Code
%%skip $skipLSTM24Agr
encoder = esios_controller.load_keras_model('/content/drive/My Drive/TFM/01.Utils/data/Encoder.json')
%%skip $skipLSTM24Agr
all_day_real_preds = encoder.predict(predictions);
print(np.shape(all_day_real_preds))
###Output
_____no_output_____
###Markdown
Predic data parseComo en todos los días no tenemos el total de las 24 horas debemos eliminar algunas de estas en el predict, que este sí que genera las 24.
###Code
%%skip $skipLSTM24Agr
y_data_target = esios_controller.get_df_daily_target_day_prics()
y_data_target = pd.DataFrame(y_data_target.values.tolist(), columns=['h'+str(i) for i in range(24)])
y_data_target_for_test = y_data_target[-458:]
np.shape(y_data_target_for_test)
%%skip $skipLSTM24Agr
import math
pred = list()
real = list()
day_key = 0
for day in y_data_target_for_test.values:
hour_key = 0
for hour in day:
if not math.isnan(hour) and hour is not 0:
real.append(hour)
pred.append(all_day_real_preds[day_key][hour_key])
hour_key += 1
day_key += 1
print(np.shape(pred))
print(np.shape(real))
###Output
_____no_output_____
###Markdown
Plot
###Code
%%skip $skipLSTM24Agr
plt.figure(figsize=(10, 10))
plt.subplot(311)
plt.plot(pred, c='seagreen', alpha=0.6)
plt.title('Predict')
plt.subplot(312)
plt.plot(real, c='orange', alpha=0.5)
plt.title('y_valid_trans')
plt.subplot(313)
plt.plot(pred, c='seagreen', alpha=0.6)
plt.plot(real, c='orange', alpha=0.5)
plt.legend(['Predicción','Real'])
plt.title('Predict + Real')
plt.suptitle('Categorical Plotting')
plt.show()
###Output
_____no_output_____
###Markdown
Metrics
###Code
%%skip $skipLSTM24Agr
esios_controller.get_metrics(pd.DataFrame(real), pd.DataFrame(pred))
###Output
_____no_output_____ |
source/08.01_HOG_Image_Features.ipynb | ###Markdown
Example 8-1 Calculating simple image gradients using Python
###Code
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, color
# Load the example image and turn it into grayscale
image = color.rgb2gray(data.chelsea())
# Compute the horizontal gradient using the centered 1D filter.
# This is equivalent to replacing each non-border pixel with the
# difference between its right and left neighbors. The leftmost
# and rightmost edges have a gradient of 0.
gx = np.empty_like(image)
gx[:, 0] = 0
gx[:, -1] = 0
gx[:, 1:-1] = image[:, :-2] - image[:, 2:]
# Same deal for the vertical gradient
gy = np.empty_like(image)
gy[0, :] = 0
gy[-1, :] = 0
gy[1:-1, :] = image[:-2, :] - image[2:, :]
# Matplotlib incantations
fig, (ax1, ax2, ax3) = plt.subplots(3, 1,
figsize=(15, 10),
sharex=True,
sharey=True)
ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Original image')
ax1.set_adjustable('box')
ax2.axis('off')
ax2.imshow(gx, cmap=plt.cm.gray)
ax2.set_title('Horizontal gradients')
ax2.set_adjustable('box')
ax3.axis('off')
ax3.imshow(gy, cmap=plt.cm.gray)
ax3.set_title('Vertical gradients')
ax3.set_adjustable('box')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Gradient Orientation Histograms
###Code
from skimage.feature import hog
from skimage import exposure
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True)
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
fig, (ax1, ax2) = plt.subplots(1, 2,
figsize=(10, 4),
sharex=True,
sharey=True)
ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Input image')
ax1.set_adjustable('box')
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
ax2.set_adjustable('box')
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
notebooks/LikelihoodExample.ipynb | ###Markdown
IntroductionThe purpose of this notebook is to summarize the basic procedure for calculating the probability that a star is a member of an isochrone (this is denoted $u_{color}$ in the `ugali` notation). For simplicity's sake, we analyze only one star and use a simple linear model for the isochrone. Apart from these simplification, we attempt to stay true to the analysis procedure. The isochrone model is built as a collection of points in color-magnitude space. We framework for the numerical calculation, though for this simplified case it is overly complex.NB: I am casual about referring to the "position" of the star or isochrone. This should always be interpreted as the "position in color-magnitude space".
###Code
%matplotlib inline
import numpy as np
import pylab as plt
import scipy.ndimage as nd
from scipy import stats
import pylab as plt
from scipy.interpolate import RectBivariateSpline
# The position of the target star
x,y = np.array((2.17,3.03))
# and its associated uncertainty
xerr,yerr = 0.5,0.5
# The discrete isochrone model (we assume no uncertainty)
xmin = ymin = 0
xmax = ymax = 10
npts = 100
iso_x, iso_y = np.linspace(xmin,xmax,npts,endpoint=False),np.linspace(ymin,ymax,npts,endpoint=False)
# Plot the isochrone and the star.
plt.figure()
plt.errorbar(x,y,xerr=xerr,yerr=yerr,color='r',fmt='o')
plt.scatter(iso_x,iso_y,c='k')
plt.xlim(xmin,xmax); plt.ylim(ymin,ymax)
plt.xlabel('x (color)'); plt.ylabel('y (mag)')
###Output
_____no_output_____
###Markdown
Membership ProbabilityThe membership probability, $u$, is proportional to the product of the stellar pdf and the isochrone pdf integrated over color-magnitude space. If both the position of the star and the isochrone are known exactly, then they are both $\delta$-functions and the probability will only be non-zero when they exactly overlap. In reality, there is uncertainty on the measured position of the star. (There may also be a random intrinsic dispersion that broadens the pdf of the isochrone, but that is not considered here.)We can incorporate the uncertainty on the measured position of the star in two ways: * We can convolve the measured position of the star with its uncertainty. This takes the $\delta$-function of the stars position and broadens it into a 2D gaussian. We can then integrate the product of the star's pdf times the isochrone pdf. Since the isochrone is a $\delta$-functions, this product will only be non-zero when the star's pdf overlaps one of the isochrone's points.* We can convolve the isochrone with the measurement uncertainty of the star. This effectively broaden the pdf of the isochrone from an infintessimal width line to a Gaussian. We can then evaluate the pdf of the broadened isochrone at the measured position of the star (a $\delta$-function in position).These two approaches should give identical results (at least subject to our model assumptions). We start out with a simple solution to the first approach, and subsequently add the numerical complexity necessary for improved performance. Simple SolutionThe simple solution is to convolve the $\delta$-function position of the star with its (Gaussian) uncertainty. This creates a 2D Gaussian pdf for the star, which we multiply by the isochrone pdf. The isochrone pdf is composed of a set of points ($\delta$-functions), so integral of the star pdf times the isochrone pdf will evaluate to zero everywhere except at the isochrone points. This means that we can evaluate the integral by just summing the value of the star pdf at the location of each isochrone point.
###Code
# Simple solution
# Change variables to scale the distance in terms of the uncertainty
dx = np.abs(iso_x - x)/xerr
dy = np.abs(iso_y - y)/yerr
# The change of variables allows us to use a standard normal pdf
pdf_x = stats.norm.pdf(dx)
pdf_y = stats.norm.pdf(dy)
# The pdf of the star is the product of the 1D pdfs
pdf_star = pdf_x * pdf_y
# The isochrone pdf needs to be normalized
pdf_iso = 1./len(iso_x)
# The integral over all space needs to have the change of variables
simple_prob = np.sum(pdf_star * pdf_iso) * (1/xerr) * (1/yerr)
print "Membership probability: ", simple_prob
###Output
Membership probability: 0.0269290395921
###Markdown
Binned SolutionThe simple solution discussed in the previous section suffers from one major drawback. It scales with the number of stars *and* the number of isochrone ponts. We can reduce this second dimensionality if we instead bin the isochrone points and integrate over the resulting 2D histogram.
###Code
# Wrap a function for creating a 2D-histogram
def histo(x,y,delta,**kwargs):
"""
Histogram the isochrone in mag-mag space.
"""
bins_x = np.arange(x.min()-0.5*delta,x.max()+0.5*delta,delta)
bins_y = np.arange(y.min()-0.5*delta,y.max()+0.5*delta,delta)
return bins_x,bins_y,np.histogram2d(x,y,bins=[bins_x, bins_y],**kwargs)[0]
def triinterp2d(x,y,z,**kwargs):
"""
Create a linear triangular interpolation using the mlab implementation.
See `matplotlib.tri.LinearTriInterpolator`
Returns a numpy masked array.
"""
import warnings
from matplotlib import tri as mtri
warnings.simplefilter(action="ignore", category=FutureWarning)
triang = mtri.Triangulation(x.flat, y.flat)
return mtri.LinearTriInterpolator(triang, z.flat)
# The real analysis requires speed, so we bin everything
# Build a 2D-histogram populated with the isochrone points
delta = 0.01
bins_x,bins_y,nhist = histo(iso_x,iso_y,delta,normed=True)
bx, by = bins_x[1:],bins_y[1:]
# x and y coordinates of each bin (actually the left bin edge)
yy,xx = np.meshgrid(bx,by)
# Array index of the star
ix,iy = np.argmin(np.abs(bx-x)),np.argmin(np.abs(by-y))
# Plot the isochrone 2D-pdf and the location of the star.
# Note that the isochrone is discrete points, so it sparsely fills the histogram (hard to see)
plt.pcolormesh(xx,yy,nhist,cmap='binary'); plt.xlim(xmin,xmax); plt.ylim(ymin,ymax)
plt.errorbar(x,y,xerr=xerr,yerr=yerr,color='r',fmt='o')
plt.xlabel('x (color)'); plt.ylabel('y (mag)')
# Check that the histogram is normalized over 2D space
print "Isochrone normalization: ", nhist.sum() * delta * delta
###Output
Isochrone normalization: 1.0
###Markdown
Star ConvolutionHere we convolve the star pdf with its uncertainty and evaluate at each isochrone point. This is the same concept as the first example, but is done in a binned manner that more easily incorporates complicated isochrone models (i.e., with intrinsic dispersion).
###Code
# Binned convolution
# Now we convolve the location of the star
dx = (xx - x)/xerr
dy = (yy - y)/yerr
# By dividing by xerr and yerr we can use a unit normal pdf
pdf_x = stats.norm.pdf(dx)
pdf_y = stats.norm.pdf(dy)
pdf_star = (pdf_x*pdf_y)
# Make sure everything is normalized
# Note that the bin sizes changed when we divided by xerr,yerr in calculating dx,dy
print "Normalization of x-pdf: ",np.sum(pdf_x[:,iy] * delta/xerr)
print "Normalization of y-pdf: ",np.sum(pdf_y[ix,:] * delta/yerr)
print "Normalization of 2D pdf: ",np.sum(pdf_star) * (delta/xerr)*(delta/yerr)
# Plot the convolved pdf of the star and the discrete isochrone
if True:
zz = np.ma.array(pdf_star,mask= pdf_star < 1e-4)
plt.pcolormesh(xx,yy,nhist,cmap='binary')
plt.pcolormesh(xx,yy,zz); plt.colorbar()
plt.scatter(x,y,c='r')
plt.xlim(xmin,xmax); plt.ylim(ymin,ymax)
plt.xlabel('x (color)'); plt.ylabel('y (mag)')
# Evaluate the probability at the star
star_conv_prob = np.sum(pdf_x * pdf_y * nhist) * (delta/xerr)*(delta/yerr)
print "Membership probability: ", star_conv_prob
###Output
Normalization of x-pdf: 0.999992878209
Normalization of y-pdf: 0.99999999932
Normalization of 2D pdf: 0.999992877529
Membership probability: 0.0269290395921
###Markdown
Isochrone ConvolutionNext we convolve the isochrone with the measurement uncertainty on the star and then evaluate the convolved isochrone pdf at the location of the star. This would be hugely beneficial if the stellar uncertainty could be described consistently for all stars. However, we are in the situation where we have varying survey depth, etc., so it seems safer to use the uncertainty on each star individually.
###Code
# First we convolve the isochrone with the measurement uncertainties of the star
pdf_iso = nd.gaussian_filter(nhist,(yerr/delta,xerr/delta), truncate=5, mode='constant')
# The Gaussian filter is messy at the boundaries and distorts the normalization
print "Convolved isochrone normalization: ", np.sum(pdf_iso) * delta * delta
# Hoever, the star is far enough from the boundaries to not be affected
# Plot the convolved isochrone and the location of the star
if True:
zz = np.ma.array(pdf_iso,mask=pdf_iso < 1e-4)
plt.pcolormesh(xx,yy,zz); plt.colorbar()
plt.scatter(x,y,c='r')
plt.xlim(xmin,xmax); plt.ylim(ymin,ymax)
plt.xlabel('x (color)'); plt.ylabel('y (mag)')
# Index to roughly the right location in the grid
#print x, y
#print xx[ix,iy],yy[ix,iy]
#iso_conv_prob = pdf_iso[ix,iy]
# Interpolate to the exact location
spline = RectBivariateSpline(bx,by,pdf_iso)
iso_conv_prob = spline(x,y)[0][0]
print "Membership probability: ", iso_conv_prob
###Output
Convolved isochrone normalization: 0.925051583314
Membership probability: 0.0269290687553
###Markdown
More Detail...The above analysis is a fairly simple description of the analysis. The actual analysis adds complexity for a few reasons:* We want to vectorize over the stars so that we can calculate all of the membership probabilities simultaneously* Because each star has a different measurement uncertainty, it is difficult to vectorize the isochrone convolution, so we choose to do the stellar convolution.* Instead of evaluating the pdf at each bin, we calculate the difference of the cdf at the two bin edges. This provides increased numerical accuracy in the case where the bins are large compared to the stellar pdf.
###Code
# Now we convolve the location of the star
dx_hi = (xx - bx[ix])/xerr
dx_lo = (xx - bx[ix+1])/xerr
dy_hi = (yy - by[iy])/yerr
dy_lo = (yy - by[iy+1])/yerr
# These are not really the pdf, but the pdf * (dx_hi - dx_lo)
pdf_x = (stats.norm.cdf(dx_hi) - stats.norm.cdf(dx_lo)) * (xerr/delta)
pdf_y = (stats.norm.cdf(dy_hi) - stats.norm.cdf(dy_lo)) * (yerr/delta)
pdf_star = pdf_x * pdf_y
# Make sure everything is normalized
# Note that the bin sizes changed when we divided by xerr,yerr in calculating dx,dy
print "Normalization of x-pdf: ",np.sum(pdf_x[:,iy]) * delta/xerr
print "Normalization of y-pdf: ",np.sum(pdf_y[ix,:]) * delta/yerr
print "Normalization of 2D pdf: ",np.sum(pdf_star) * (delta/xerr) * (delta/yerr)
# Plot the convolved pdf of the star and the discrete isochrone
if True:
zz = np.ma.array(pdf_star,mask= pdf_star < 1e-4)
plt.pcolormesh(xx,yy,nhist,cmap='binary')
plt.pcolormesh(xx,yy,zz); plt.colorbar()
plt.scatter(x,y,c='r')
plt.xlim(xmin,xmax); plt.ylim(ymin,ymax)
plt.xlabel('x (color)'); plt.ylabel('y (mag)')
# Evaluate the probability at the star
star_cdf_prob = np.sum(pdf_x * pdf_y * nhist) * (delta/xerr) * (delta/yerr)
print "Membership probability: ",star_cdf_prob
###Output
Normalization of x-pdf: 0.999992875864
Normalization of y-pdf: 0.999999999319
Normalization of 2D pdf: 0.999992875184
Membership probability: 0.0269292546543
###Markdown
Moment of truth...So now we would like to compare the various techniques of estimating the membership probability. Do they match up...?
###Code
print "Membership probability from the simple solution: ",simple_prob
print "Membership probability from convolving star: ", star_conv_prob
print "Membership probability from convolving isochrone: ", iso_conv_prob
print "Membership probability from CDF convolution: ", star_cdf_prob
###Output
Membership probability from the simple solution: 0.0269290395921
Membership probability from convolving star: 0.0269290395921
Membership probability from convolving isochrone: 0.0269290687553
Membership probability from CDF convolution: 0.0269292546543
|
notebooks/.ipynb_checkpoints/A002-MNIST-2layers-checkpoint.ipynb | ###Markdown
Sequential MNIST results from the paper by Rui Costa et al.:"Cortical microcircuits as gated-recurrent neural networks" Implementation done in the scope of the nurture.ai NIPS 2017 paper implementation challenge - nurture.ai challenge: https://nurture.ai/nips-challenge- Paper: http://papers.nips.cc/paper/6631-cortical-microcircuits-as-gated-recurrent-neural-networks- Credits: Training logic based on the r2rt LSTM tutorial (https://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html). Model definition based on KnHuq implementation (https://github.com/KnHuq/Dynamic-Tensorflow-Tutorial/blob/master/LSTM/LSTM.py). This notebook compare the results of models with 2 layers (not done in the paper) Loading Librairies and Models
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import sys
#import LSTM and subLSMT cell models with 2 layers
sys.path.append('../models/')
from LSTMTwoLayers import *
from subLSTMTwoLayers import *
from parameters import *
sys.path.append('../src/common/')
import helper as hp
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
Loading MNIST dataset
###Code
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
###Output
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
###Markdown
Training Function
###Code
def train_network(g, batch_size=50, n_epoch=10, verbose=False, save=False):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
accuracy = 0
#Iterations to do trainning
for epoch in range(n_epoch):
X, Y = mnist.train.next_batch(batch_size)
X = X.reshape(batch_size, 1, g['input_size'])
sess.run(g['train_step'],feed_dict={g['rnn']._inputs:X, g['y']:Y})
# save last epoch
if epoch == n_epoch -1 :
X_test = mnist.test.images.reshape(10000,1,g['input_size'])
accuracy= sess.run(g['accuracy'],feed_dict={g['rnn']._inputs:X_test, g['y']:mnist.test.labels})
if verbose:
if epoch % 10 == 0:
Loss=str(sess.run(g['cross_entropy'],feed_dict={g['rnn']._inputs:X, g['y']:Y}))
Train_accuracy=str(sess.run(g['accuracy'],feed_dict={g['rnn']._inputs:X, g['y']:Y}))
X_test = mnist.test.images.reshape(10000,1,g['input_size'])
Test_accuracy=str(sess.run(g['accuracy'],feed_dict={g['rnn']._inputs:X_test, g['y']:mnist.test.labels}))
print("\rIteration: %s Loss: %s Train Accuracy: %s Test Accuracy: %s"%(epoch,Loss,Train_accuracy,Test_accuracy))
if isinstance(save, str):
g['saver'].save(sess, save)
return accuracy
###Output
_____no_output_____
###Markdown
Building Graph Model Function
###Code
def build_graph(cell_type = None, load_parameters = False):
# define initial parameters
input_size = 784
output_size = 10
optimizer = 'Adam'
momentum = False
learning_rate = 0.001
hidden_units = 10
if load_parameters:
#load parameters from file
if cell_type == 'LSTM':
parameters = LSTM_parameters()
elif cell_type == 'sub_LSTM':
parameters = SubLSTM_parameters()
elif cell_type == 'fix_sub_LSTM':
parameters = Fix_subLSTM_parameters()
else:
print("No cell_type selected! Use LSTM cell")
parameters = LSTM_parameters()
input_size = parameters.mnist['input_size']
output_size = parameters.mnist['output_size']
optimizer = parameters.mnist['optimizer']
momentum = parameters.mnist['momentum']
learning_rate = parameters.mnist['learning_rate']
hidden_units = parameters.mnist['hidden_units']
# reset graph
if 'sess' in globals() and sess:
sess.close()
tf.reset_default_graph()
# Initializing rnn object
if cell_type == 'LSTM':
rnn = LSTM_cell(input_size, hidden_units, output_size)
elif cell_type == 'sub_LSTM':
rnn = subLSTM_cell(input_size, hidden_units, output_size)
elif cell_type == 'fix_sub_LSTM':
print("TODO!")
else:
rnn = LSTM_cell(input_size, hidden_units, output_size)
#input label placeholder
y = tf.placeholder(tf.float32, [None, output_size])
# Getting all outputs from rnn
outputs = rnn.get_outputs()
# Getting final output through indexing after reversing
last_output = outputs[-1]
# As rnn model output the final layer through Relu activation softmax is
# used for final output
output = tf.nn.softmax(last_output)
# Computing the Cross Entropy loss
cross_entropy = -tf.reduce_sum(y * tf.log(output))
# setting optimizer
if optimizer == 'Adam':
# Trainning with Adam Optimizer
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
elif optimizer == 'RMSProp':
# Trainning with RMSProp Optimizer
train_step = tf.train.RMSPropOptimizer(learning_rate).minimize(cross_entropy)
else:
#if nothing is define use Adam optimizer
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
# Calculation of correct prediction and accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(output, 1))
accuracy = (tf.reduce_mean(tf.cast(correct_prediction, tf.float32))) * 100
return dict(
rnn = rnn,
y = y,
input_size = input_size,
output = output,
cross_entropy = cross_entropy,
train_step = train_step,
preds = correct_prediction,
accuracy = accuracy,
saver = tf.train.Saver()
)
###Output
_____no_output_____
###Markdown
Simulation Parameters
###Code
n_simulation = 2
batch_size = 50
n_epoch = 1000
###Output
_____no_output_____
###Markdown
LSTM training
###Code
%%time
lstm_accuracies = []
print('Run for: ', n_simulation, ' simulation(s)')
for n in range(n_simulation):
g = build_graph(cell_type='LSTM', load_parameters=True)
test_accuracy = train_network(g, batch_size, n_epoch, verbose=False)
lstm_accuracies.append(test_accuracy)
lstm_mean_accuracy = np.mean(lstm_accuracies)
lstm_std_accuracy = np.std(lstm_accuracies)
lstm_best_accuracy = np.amax(lstm_accuracies)
print("The mean test accuracy of the simulation is:", lstm_mean_accuracy)
print("the standard deviation is:", lstm_std_accuracy)
print("The best test accuracy obtained was:", lstm_best_accuracy)
###Output
Run for: 2 simulation(s)
The mean test accuracy of the simulation is: 47.955
the standard deviation is: 2.995
The best test accuracy obtained was: 50.95
CPU times: user 1min 2s, sys: 2.95 s, total: 1min 5s
Wall time: 27.6 s
###Markdown
SubLSTM training
###Code
%%time
sub_lstm_accuracies = []
print('Run for: ', n_simulation, ' simulation(s)')
for n in range(n_simulation):
g = build_graph(cell_type='sub_LSTM', load_parameters=True)
test_accuracy = train_network(g, batch_size, n_epoch, verbose = False)
sub_lstm_accuracies.append(test_accuracy)
sub_lstm_mean_accuracy = np.mean(sub_lstm_accuracies)
sub_lstm_std_accuracy = np.std(sub_lstm_accuracies)
sub_lstm_best_accuracy = np.amax(sub_lstm_accuracies)
print("The mean test accuracy of the simulation is:", sub_lstm_mean_accuracy)
print("the standard deviation is:", sub_lstm_std_accuracy)
print("The best test accuracy obtained was:", sub_lstm_best_accuracy)
###Output
Run for: 2 simulation(s)
The mean test accuracy of the simulation is: 46.265
the standard deviation is: 2.555
The best test accuracy obtained was: 48.82
CPU times: user 1min 3s, sys: 2.92 s, total: 1min 5s
Wall time: 28.2 s
###Markdown
Plot test mean accuracies and std
###Code
objects = ('LSTM', 'SubLSTM')
accuracies = [lstm_mean_accuracy,sub_lstm_mean_accuracy]
std_accuracies = [lstm_std_accuracy,sub_lstm_std_accuracy]
hp.bar_plot(objects, accuracies, std_accuracies)
###Output
_____no_output_____ |
examples/02_model_hybrid/fm_deep_dive.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Factorization Machine Deep DiveFactorization machine (FM) is one of the representative algorithms that are used for building hybrid recommenders model. The algorithm is powerful in terms of capturing the effects of not just the input features but also their interactions. The algorithm provides better generalization capability and expressiveness compared to other classic algorithms such as SVMs. The most recent research extends the basic FM algorithms by using deep learning techniques, which achieve remarkable improvement in a few practical use cases.This notebook presents a deep dive into the Factorization Machine algorithm, and demonstrates some best practices of using the contemporary FM implementations like [`xlearn`](https://github.com/aksnzhy/xlearn) for dealing with tasks like click-through rate prediction. 1 Factorization Machine 1.1 Factorization Machine FM is an algorithm that uses factorization in prediction tasks with data set of high sparsity. The algorithm was original proposed in [\[1\]](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf). Traditionally, the algorithms such as SVM do not perform well in dealing with highly sparse data that is usually seen in many contemporary problems, e.g., click-through rate prediction, recommendation, etc. FM handles the problem by modeling not just first-order linear components for predicting the label, but also the cross-product of the feature variables in order to capture more generalized correlation between variables and label. In certain occasions, the data that appears in recommendation problems, such as user, item, and feature vectors, can be encoded into a one-hot representation. Under this arrangement, classical algorithms like linear regression and SVM may suffer from the following problems:1. The feature vectors are highly sparse, and thus it makes it hard to optimize the parameters to fit the model efficienly2. Cross-product of features will be sparse as well, and this in turn, reduces the expressiveness of a model if it is designed to capture the high-order interactions between features The FM algorithm is designed to tackle the above two problems by factorizing latent vectors that model the low- and high-order components. The general idea of a FM model is expressed in the following equation: $$\hat{y}(\textbf{x})=w_{0}+\sum^{n}_{i=1}w_{i}x_{i}+\sum^{n}_{i=1}\sum^{n}_{j=i+1}x_{i}x_{j}$$ where $\hat{y}$ and $\textbf{x}$ are the target to predict and input feature vectors, respectively. $w_{i}$ is the model parameters for the first-order component. $$ is the dot product of two latent factors for the second-order interaction of feature variables, and it is defined as $$=\sum^{k}_{f=1}v_{i,f}\cdot v_{j,f}$$ Compared to using fixed parameter for the high-order interaction components, using the factorized vectors increase generalization as well as expressiveness of the model. In addition to this, the computation complexity of the equation (above) is $O(kn)$ where $k$ and $n$ are the dimensionalities of the factorization vector and input feature vector, respectively (see [the paper](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) for detailed discussion). In practice, usually a two-way FM model is used, i.e., only the second-order feature interactions are considered to favor computational efficiency. 1.2 Field-Aware Factorization Machine Field-aware factorization machine (FFM) is an extension to FM. It was originally introduced in [\[2\]](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf). The advantage of FFM over FM is that, it uses different factorized latent factors for different groups of features. The "group" is called "field" in the context of FFM. Putting features into fields resolves the issue that the latent factors shared by features that intuitively represent different categories of information may not well generalize the correlation. Different from the formula for the 2-order cross product as can be seen above in the FM equation, in the FFM settings, the equation changes to $$\theta_{\text{FFM}}(\textbf{w}\textbf{x})=\sum^{n}_{j1=1}\sum^{n}_{j2=j1+1}x_{j1}x_{j2}$$ where $f_1$ and $f_2$ are the fields of $j_1$ and $j_2$, respectively. Compared to FM, the computational complexity increases to $O(n^2k)$. However, since the latent factors in FFM only need to learn the effect within the field, so the $k$ values in FFM is usually much smaller than that in FM. 1.3 FM/FFM extensions In the recent years, FM/FFM extensions were proposed to enhance the model performance further. The new algorithms leverage the powerful deep learning neural network to improve the generalization capability of the original FM/FFM algorithms. Representatives of the such algorithms are summarized as below. Some of them are implemented and demonstrated in the microsoft/recommenders repository. |Algorithm|Notes|References|Example in Microsoft/Recommenders||---------|-----|----------|---------------------------------||DeepFM|Combination of FM and DNN where DNN handles high-order interactions|[\[3\]](https://arxiv.org/abs/1703.04247)|-||xDeepFM|Combination of FM, DNN, and Compressed Interaction Network, for vectorized feature interactions|[\[4\]](https://dl.acm.org/citation.cfm?id=3220023)|[notebook](../00_quick_start/xdeepfm_criteo.ipynb) / [utilities](../../reco_utils/recommender/deeprec/models/xDeepFM.py)||Factorization Machine Supported Neural Network|Use FM user/item weight vectors as input layers for DNN model|[\[5\]](https://link.springer.com/chapter/10.1007/978-3-319-30671-1_4)|-||Product-based Neural Network|An additional product-wise layer between embedding layer and fully connected layer to improve expressiveness of interactions of features across fields|[\[6\]](https://ieeexplore.ieee.org/abstract/document/7837964)|-||Neural Factorization Machines|Improve the factorization part of FM by using stacks of NN layers to improve non-linear expressiveness|[\[7\]](https://dl.acm.org/citation.cfm?id=3080777)|-||Wide and deep|Combination of linear model (wide part) and deep neural network model (deep part) for memorisation and generalization|[\[8\]](https://dl.acm.org/citation.cfm?id=2988454)|[notebook](../00_quick_start/wide_deep_movielens.ipynb) / [utilities](../../reco_utils/recommender/wide_deep)| 2 Factorization Machine Implementation 2.1 Implementations The following table summarizes the implementations of FM/FFM. Some of them (e.g., xDeepFM and VW) are implemented and/or demonstrated in the microsoft/recommenders repository |Implementation|Language|Notes|Examples in Microsoft/Recommenders||-----------------|------------------|------------------|---------------------||[libfm](https://github.com/srendle/libfm)|C++|Implementation of FM algorithm|-||[libffm](https://github.com/ycjuan/libffm)|C++|Original implemenation of FFM algorithm. It is handy in model building, but does not support Python interface|-||[xlearn](https://github.com/aksnzhy/xlearn)|C++ with Python interface|More computationally efficient compared to libffm without loss of modeling effectiveness|[notebook](fm_deep_dive.ipynb)||[Vowpal Wabbit FM](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example)|Online library with estimator API|Easy to use by calling API|[notebook](../02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb) / [utilities](../../reco_utils/recommender/vowpal_wabbit)|[microsoft/recommenders xDeepFM](../../reco_utils/recommender/deeprec/models/xDeepFM.py)|Python|Support flexible interface with different configurations of FM and FM extensions, i.e., LR, FM, and/or CIN|[notebook](../00_quick_start/xdeepfm_criteo.ipynb) / [utilities](../../reco_utils/recommender/deeprec/models/xDeepFM.py)| Other than `libfm` and `libffm`, all the other three can be used in a Python environment. * A deep dive of using Vowbal Wabbit for FM model can be found [here](../02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb)* A quick start of Microsoft xDeepFM algorithm can be found [here](../00_quick_start/xdeepfm_criteo.ipynb). Therefore, in the example below, only code examples and best practices of using `xlearn` are presented. 2.2 xlearn Setups for using `xlearn`.1. `xlearn` is implemented in C++ and has Python bindings, so it can be directly installed as a Python package from PyPI. The installation of `xlearn` is enabled in the [Recommenders repo environment setup script](../../tools/generate_conda_file.py). One can follow the general setup steps to install the environment as required, in which `xlearn` is installed as well.2. NOTE `xlearn` may require some base libraries installed as prerequisites in the system, e.g., `cmake`. After a succesful creation of the environment, one can load the packages to run `xlearn` in a Jupyter notebook or Python script.
###Code
import sys
import os
import papermill as pm
import scrapbook as sb
from tempfile import TemporaryDirectory
import xlearn as xl
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib notebook
from matplotlib import pyplot as plt
from reco_utils.common.constants import SEED
from reco_utils.common.timer import Timer
from reco_utils.dataset.download_utils import maybe_download, unzip_file
from reco_utils.tuning.parameter_sweep import generate_param_grid
from reco_utils.dataset.pandas_df_utils import LibffmConverter
print("System version: {}".format(sys.version))
print("Xlearn version: {}".format(xl.__version__))
###Output
System version: 3.6.13 |Anaconda, Inc.| (default, Feb 23 2021, 21:15:04)
[GCC 7.3.0]
Xlearn version: 0.4.0
###Markdown
In the FM model building, data is usually represented in the libsvm data format. That is, `label feat1:val1 feat2:val2 ...`, where `label` is the target to predict, and `val` is the value to each feature `feat`.FFM algorithm requires data to be represented in the libffm format, where each vector is split into several fields with categorical/numerical features inside. That is, `label field1:feat1:val1 field2:feat2:val2 ...`. In the Microsoft/Recommenders utility functions, [a libffm converter](../../reco_utils/dataset/pandas_df_utils.py) is provided to achieve the transformation from a tabular feature vectors to the corresponding libffm representation. For example, the following shows how to transform the format of a synthesized data by using the module of `LibffmConverter`.
###Code
df_feature_original = pd.DataFrame({
'rating': [1, 0, 0, 1, 1],
'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],
'field2': [3, 4, 5, 6, 7],
'field3': [1.0, 2.0, 3.0, 4.0, 5.0],
'field4': ['1', '2', '3', '4', '5']
})
converter = LibffmConverter().fit(df_feature_original, col_rating='rating')
df_out = converter.transform(df_feature_original)
df_out
print('There are in total {0} fields and {1} features.'.format(converter.field_count, converter.feature_count))
###Output
There are in total 4 fields and 10 features.
###Markdown
To illustrate the use of `xlearn`, the following example uses the [Criteo data set](https://labs.criteo.com/category/dataset/), which has already been processed in the libffm format, for building and evaluating a FFM model built by using `xlearn`. Sometimes, it is important to know the total numbers of fields and features. When building a FFM model, `xlearn` can count these numbers automatically.
###Code
# Model parameters
LEARNING_RATE = 0.2
LAMBDA = 0.002
EPOCH = 10
OPT_METHOD = "sgd" # options are "sgd", "adagrad" and "ftrl"
# The metrics for binary classification options are "acc", "prec", "f1" and "auc"
# for regression, options are "rmse", "mae", "mape"
METRIC = "auc"
# Paths
YAML_FILE_NAME = "xDeepFM.yaml"
TRAIN_FILE_NAME = "cretio_tiny_train"
VALID_FILE_NAME = "cretio_tiny_valid"
TEST_FILE_NAME = "cretio_tiny_test"
MODEL_FILE_NAME = "model.out"
OUTPUT_FILE_NAME = "output.txt"
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
yaml_file = os.path.join(data_path, YAML_FILE_NAME)
train_file = os.path.join(data_path, TRAIN_FILE_NAME)
valid_file = os.path.join(data_path, VALID_FILE_NAME)
test_file = os.path.join(data_path, TEST_FILE_NAME)
model_file = os.path.join(data_path, MODEL_FILE_NAME)
output_file = os.path.join(data_path, OUTPUT_FILE_NAME)
assets_url = "https://recodatasets.z20.web.core.windows.net/deeprec/xdeepfmresources.zip"
assets_file = maybe_download(assets_url, work_directory=data_path)
unzip_file(assets_file, data_path)
###Output
100%|██████████| 10.3k/10.3k [00:00<00:00, 55.9kKB/s]
###Markdown
The following steps are from the [official documentation of `xlearn`](https://xlearn-doc.readthedocs.io/en/latest/index.html) for building a model. To begin with, we do not modify any training parameter values. NOTE, if `xlearn` is run through command line, the training process can be displayed in the console.
###Code
# Training task
ffm_model = xl.create_ffm() # Use field-aware factorization machine (ffm)
ffm_model.setTrain(train_file) # Set the path of training dataset
ffm_model.setValidate(valid_file) # Set the path of validation dataset
# Parameters:
# 0. task: binary classification
# 1. learning rate: 0.2
# 2. regular lambda: 0.002
# 3. evaluation metric: auc
# 4. number of epochs: 10
# 5. optimization method: sgd
param = {"task":"binary",
"lr": LEARNING_RATE,
"lambda": LAMBDA,
"metric": METRIC,
"epoch": EPOCH,
"opt": OPT_METHOD
}
# Start to train
# The trained model will be stored in model.out
with Timer() as time_train:
ffm_model.fit(param, model_file)
print(f"Training time: {time_train}")
# Prediction task
ffm_model.setTest(test_file) # Set the path of test dataset
ffm_model.setSigmoid() # Convert output to 0-1
# Start to predict
# The output result will be stored in output.txt
with Timer() as time_predict:
ffm_model.predict(model_file, output_file)
print(f"Prediction time: {time_predict}")
###Output
Prediction time: 0.6435
###Markdown
The output are the predicted labels (i.e., 1 or 0) for the testing data set. AUC score is calculated to evaluate the model performance.
###Code
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_score = roc_auc_score(truths, predictions)
auc_score
sb.glue('auc_score', auc_score)
###Output
_____no_output_____
###Markdown
It can be seen that the model building/scoring process is fast and the model performance is good. 2.3 Hyperparameter tuning of `xlearn` The following presents a naive approach to tune the parameters of `xlearn`, which is using grid-search of parameter values to find the optimal combinations. It is worth noting that the original [FFM paper](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) gave some hints in terms of the impact of parameters on the sampled Criteo dataset. The following are the parameters that can be tuned in the `xlearn` implementation of FM/FFM algorithm. |Parameter|Description|Default value|Notes||-------------|-----------------|------------------|-----------------||`lr`|Learning rate|0.2|Higher learning rate helps fit a model more efficiently but may also result in overfitting.||`lambda`|Regularization parameter|0.00002|The value needs to be selected empirically to avoid overfitting.||`k`|Dimensionality of the latent factors|4|In FFM the effect of k is not that significant as the algorithm itself considers field where `k` can be small to capture the effect of features within each of the fields.||`init`|Model initialization|0.66|-||`epoch`|Number of epochs|10|Using a larger epoch size will help converge the model to its optimal point|
###Code
param_dict = {
"lr": [0.0001, 0.001, 0.01],
"lambda": [0.001, 0.01, 0.1]
}
param_grid = generate_param_grid(param_dict)
auc_scores = []
with Timer() as time_tune:
for param in param_grid:
ffm_model = xl.create_ffm()
ffm_model.setTrain(train_file)
ffm_model.setValidate(valid_file)
ffm_model.fit(param, model_file)
ffm_model.setTest(test_file)
ffm_model.setSigmoid()
ffm_model.predict(model_file, output_file)
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_scores.append(roc_auc_score(truths, predictions))
print('Tuning by grid search takes {0:.2} min'.format(time_tune.interval / 60))
auc_scores = [float('%.4f' % x) for x in auc_scores]
auc_scores_array = np.reshape(auc_scores, (len(param_dict["lr"]), len(param_dict["lambda"])))
auc_df = pd.DataFrame(
data=auc_scores_array,
index=pd.Index(param_dict["lr"], name="LR"),
columns=pd.Index(param_dict["lambda"], name="Lambda")
)
auc_df
fig, ax = plt.subplots()
sns.heatmap(auc_df, cbar=False, annot=True, fmt=".4g")
###Output
_____no_output_____
###Markdown
More advanced tuning methods like Bayesian Optimization can be used for searching for the optimal model efficiently. The benefit of using, for example, `HyperDrive` from Azure Machine Learning Services, for tuning the parameters, is that, the tuning tasks can be distributed across nodes of a cluster and the optimization can be run concurrently to save the total cost.* Details about how to tune hyper parameters by using Azure Machine Learning Services can be found [here](https://github.com/microsoft/recommenders/tree/master/notebooks/04_model_select_and_optimize).* Note, to enable the tuning task on Azure Machine Learning Services by using HyperDrive, one needs a Docker image to containerize the environment where `xlearn` can be run. The Docker file provided [here](https://github.com/microsoft/recommenders/tree/master/docker) can be used for such purpose. 2.4 Clean up
###Code
tmpdir.cleanup()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Factorization Machine Deep DiveFactorization machine (FM) is one of the representative algorithms that are used for building hybrid recommenders model. The algorithm is powerful in terms of capturing the effects of not just the input features but also their interactions. The algorithm provides better generalization capability and expressiveness compared to other classic algorithms such as SVMs. The most recent research extends the basic FM algorithms by using deep learning techniques, which achieve remarkable improvement in a few practical use cases.This notebook presents a deep dive into the Factorization Machine algorithm, and demonstrates some best practices of using the contemporary FM implementations like [`xlearn`](https://github.com/aksnzhy/xlearn) for dealing with tasks like click-through rate prediction. 1 Factorization Machine 1.1 Factorization Machine FM is an algorithm that uses factorization in prediction tasks with data set of high sparsity. The algorithm was original proposed in [\[1\]](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf). Traditionally, the algorithms such as SVM do not perform well in dealing with highly sparse data that is usually seen in many contemporary problems, e.g., click-through rate prediction, recommendation, etc. FM handles the problem by modeling not just first-order linear components for predicting the label, but also the cross-product of the feature variables in order to capture more generalized correlation between variables and label. In certain occasions, the data that appears in recommendation problems, such as user, item, and feature vectors, can be encoded into a one-hot representation. Under this arrangement, classical algorithms like linear regression and SVM may suffer from the following problems:1. The feature vectors are highly sparse, and thus it makes it hard to optimize the parameters to fit the model efficienly2. Cross-product of features will be sparse as well, and this in turn, reduces the expressiveness of a model if it is designed to capture the high-order interactions between features The FM algorithm is designed to tackle the above two problems by factorizing latent vectors that model the low- and high-order components. The general idea of a FM model is expressed in the following equation: $$\hat{y}(\textbf{x})=w_{0}+\sum^{n}_{i=1}w_{i}x_{i}+\sum^{n}_{i=1}\sum^{n}_{j=i+1}x_{i}x_{j}$$ where $\hat{y}$ and $\textbf{x}$ are the target to predict and input feature vectors, respectively. $w_{i}$ is the model parameters for the first-order component. $$ is the dot product of two latent factors for the second-order interaction of feature variables, and it is defined as $$=\sum^{k}_{f=1}v_{i,f}\cdot v_{j,f}$$ Compared to using fixed parameter for the high-order interaction components, using the factorized vectors increase generalization as well as expressiveness of the model. In addition to this, the computation complexity of the equation (above) is $O(kn)$ where $k$ and $n$ are the dimensionalities of the factorization vector and input feature vector, respectively (see [the paper](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) for detailed discussion). In practice, usually a two-way FM model is used, i.e., only the second-order feature interactions are considered to favor computational efficiency. 1.2 Field-Aware Factorization Machine Field-aware factorization machine (FFM) is an extension to FM. It was originally introduced in [\[2\]](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf). The advantage of FFM over FM is that, it uses different factorized latent factors for different groups of features. The "group" is called "field" in the context of FFM. Putting features into fields resolves the issue that the latent factors shared by features that intuitively represent different categories of information may not well generalize the correlation. Different from the formula for the 2-order cross product as can be seen above in the FM equation, in the FFM settings, the equation changes to $$\theta_{\text{FFM}}(\textbf{w}\textbf{x})=\sum^{n}_{j1=1}\sum^{n}_{j2=j1+1}x_{j1}x_{j2}$$ where $f_1$ and $f_2$ are the fields of $j_1$ and $j_2$, respectively. Compared to FM, the computational complexity increases to $O(n^2k)$. However, since the latent factors in FFM only need to learn the effect within the field, so the $k$ values in FFM is usually much smaller than that in FM. 1.3 FM/FFM extensions In the recent years, FM/FFM extensions were proposed to enhance the model performance further. The new algorithms leverage the powerful deep learning neural network to improve the generalization capability of the original FM/FFM algorithms. Representatives of the such algorithms are summarized as below. Some of them are implemented and demonstrated in the microsoft/recommenders repository. |Algorithm|Notes|References|Example in Microsoft/Recommenders||---------|-----|----------|---------------------------------||DeepFM|Combination of FM and DNN where DNN handles high-order interactions|[\[3\]](https://arxiv.org/abs/1703.04247)|-||xDeepFM|Combination of FM, DNN, and Compressed Interaction Network, for vectorized feature interactions|[\[4\]](https://dl.acm.org/citation.cfm?id=3220023)|[notebook](../00_quick_start/xdeepfm_criteo.ipynb) / [utilities](../../reco_utils/recommender/deeprec/models/xDeepFM.py)||Factorization Machine Supported Neural Network|Use FM user/item weight vectors as input layers for DNN model|[\[5\]](https://link.springer.com/chapter/10.1007/978-3-319-30671-1_4)|-||Product-based Neural Network|An additional product-wise layer between embedding layer and fully connected layer to improve expressiveness of interactions of features across fields|[\[6\]](https://ieeexplore.ieee.org/abstract/document/7837964)|-||Neural Factorization Machines|Improve the factorization part of FM by using stacks of NN layers to improve non-linear expressiveness|[\[7\]](https://dl.acm.org/citation.cfm?id=3080777)|-||Wide and deep|Combination of linear model (wide part) and deep neural network model (deep part) for memorisation and generalization|[\[8\]](https://dl.acm.org/citation.cfm?id=2988454)|[notebook](../00_quick_start/wide_deep_movielens.ipynb) / [utilities](../../reco_utils/recommender/wide_deep)| 2 Factorization Machine Implementation 2.1 Implementations The following table summarizes the implementations of FM/FFM. Some of them (e.g., xDeepFM and VW) are implemented and/or demonstrated in the microsoft/recommenders repository |Implementation|Language|Notes|Examples in Microsoft/Recommenders||-----------------|------------------|------------------|---------------------||[libfm](https://github.com/srendle/libfm)|C++|Implementation of FM algorithm|-||[libffm](https://github.com/ycjuan/libffm)|C++|Original implemenation of FFM algorithm. It is handy in model building, but does not support Python interface|-||[xlearn](https://github.com/aksnzhy/xlearn)|C++ with Python interface|More computationally efficient compared to libffm without loss of modeling effectiveness|[notebook](fm_deep_dive.ipynb)||[Vowpal Wabbit FM](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example)|Online library with estimator API|Easy to use by calling API|[notebook](../02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb) / [utilities](../../reco_utils/recommender/vowpal_wabbit)|[microsoft/recommenders xDeepFM](../../reco_utils/recommender/deeprec/models/xDeepFM.py)|Python|Support flexible interface with different configurations of FM and FM extensions, i.e., LR, FM, and/or CIN|[notebook](../00_quick_start/xdeepfm_criteo.ipynb) / [utilities](../../reco_utils/recommender/deeprec/models/xDeepFM.py)| Other than `libfm` and `libffm`, all the other three can be used in a Python environment. * A deep dive of using Vowbal Wabbit for FM model can be found [here](../02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb)* A quick start of Microsoft xDeepFM algorithm can be found [here](../00_quick_start/xdeepfm_criteo.ipynb). Therefore, in the example below, only code examples and best practices of using `xlearn` are presented. 2.2 xlearn Setups for using `xlearn`.1. `xlearn` is implemented in C++ and has Python bindings, so it can be directly installed as a Python package from PyPI. The installation of `xlearn` is enabled in the [Recommenders repo environment setup script](../../tools/generate_conda_file.py). One can follow the general setup steps to install the environment as required, in which `xlearn` is installed as well.2. NOTE `xlearn` may require some base libraries installed as prerequisites in the system, e.g., `cmake`. After a succesful creation of the environment, one can load the packages to run `xlearn` in a Jupyter notebook or Python script.
###Code
import sys
sys.path.append("../../")
import os
import papermill as pm
import scrapbook as sb
from tempfile import TemporaryDirectory
import xlearn as xl
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib notebook
from matplotlib import pyplot as plt
from reco_utils.common.constants import SEED
from reco_utils.common.timer import Timer
from reco_utils.recommender.deeprec.deeprec_utils import (
download_deeprec_resources, prepare_hparams
)
from reco_utils.recommender.deeprec.models.xDeepFM import XDeepFMModel
from reco_utils.recommender.deeprec.io.iterator import FFMTextIterator
from reco_utils.tuning.parameter_sweep import generate_param_grid
from reco_utils.dataset.pandas_df_utils import LibffmConverter
print("System version: {}".format(sys.version))
print("Xlearn version: {}".format(xl.__version__))
###Output
System version: 3.6.8 |Anaconda, Inc.| (default, Dec 30 2018, 01:22:34)
[GCC 7.3.0]
Xlearn version: 0.4.0
###Markdown
In the FM model building, data is usually represented in the libsvm data format. That is, `label feat1:val1 feat2:val2 ...`, where `label` is the target to predict, and `val` is the value to each feature `feat`.FFM algorithm requires data to be represented in the libffm format, where each vector is split into several fields with categorical/numerical features inside. That is, `label field1:feat1:val1 field2:feat2:val2 ...`. In the Microsoft/Recommenders utility functions, [a libffm converter](../../reco_utils/dataset/pandas_df_utils.py) is provided to achieve the transformation from a tabular feature vectors to the corresponding libffm representation. For example, the following shows how to transform the format of a synthesized data by using the module of `LibffmConverter`.
###Code
df_feature_original = pd.DataFrame({
'rating': [1, 0, 0, 1, 1],
'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],
'field2': [3, 4, 5, 6, 7],
'field3': [1.0, 2.0, 3.0, 4.0, 5.0],
'field4': ['1', '2', '3', '4', '5']
})
converter = LibffmConverter().fit(df_feature_original, col_rating='rating')
df_out = converter.transform(df_feature_original)
df_out
print('There are in total {0} fields and {1} features.'.format(converter.field_count, converter.feature_count))
###Output
There are in total 4 fields and 10 features.
###Markdown
To illustrate the use of `xlearn`, the following example uses the [Criteo data set](https://labs.criteo.com/category/dataset/), which has already been processed in the libffm format, for building and evaluating a FFM model built by using `xlearn`. Sometimes, it is important to know the total numbers of fields and features. When building a FFM model, `xlearn` can count these numbers automatically.
###Code
# Parameters
YAML_FILE_NAME = "xDeepFM.yaml"
TRAIN_FILE_NAME = "cretio_tiny_train"
VALID_FILE_NAME = "cretio_tiny_valid"
TEST_FILE_NAME = "cretio_tiny_test"
MODEL_FILE_NAME = "model.out"
OUTPUT_FILE_NAME = "output.txt"
LEARNING_RATE = 0.2
LAMBDA = 0.002
# The metrics for binary classification options are "acc", "prec", "f1" and "auc"
# for regression, options are "rmse", "mae", "mape"
METRIC = "auc"
EPOCH = 10
OPT_METHOD = "sgd" # options are "sgd", "adagrad" and "ftrl"
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
yaml_file = os.path.join(data_path, YAML_FILE_NAME)
train_file = os.path.join(data_path, TRAIN_FILE_NAME)
valid_file = os.path.join(data_path, VALID_FILE_NAME)
test_file = os.path.join(data_path, TEST_FILE_NAME)
model_file = os.path.join(data_path, MODEL_FILE_NAME)
output_file = os.path.join(data_path, OUTPUT_FILE_NAME)
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.z20.web.core.windows.net/deeprec/', data_path, 'xdeepfmresources.zip')
###Output
100%|██████████| 10.3k/10.3k [00:01<00:00, 8.67kKB/s]
###Markdown
The following steps are from the [official documentation of `xlearn`](https://xlearn-doc.readthedocs.io/en/latest/index.html) for building a model. To begin with, we do not modify any training parameter values. NOTE, if `xlearn` is run through command line, the training process can be displayed in the console.
###Code
# Training task
ffm_model = xl.create_ffm() # Use field-aware factorization machine (ffm)
ffm_model.setTrain(train_file) # Set the path of training dataset
ffm_model.setValidate(valid_file) # Set the path of validation dataset
# Parameters:
# 0. task: binary classification
# 1. learning rate: 0.2
# 2. regular lambda: 0.002
# 3. evaluation metric: auc
# 4. number of epochs: 10
# 5. optimization method: sgd
param = {"task":"binary",
"lr": LEARNING_RATE,
"lambda": LAMBDA,
"metric": METRIC,
"epoch": EPOCH,
"opt": OPT_METHOD
}
# Start to train
# The trained model will be stored in model.out
with Timer() as time_train:
ffm_model.fit(param, model_file)
# Prediction task
ffm_model.setTest(test_file) # Set the path of test dataset
ffm_model.setSigmoid() # Convert output to 0-1
# Start to predict
# The output result will be stored in output.txt
with Timer() as time_predict:
ffm_model.predict(model_file, output_file)
###Output
_____no_output_____
###Markdown
The output are the predicted labels (i.e., 1 or 0) for the testing data set. AUC score is calculated to evaluate the model performance.
###Code
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_score = roc_auc_score(truths, predictions)
auc_score
sb.glue('auc_score', auc_score)
print('Training takes {0:.2f}s and predicting takes {1:.2f}s.'.format(time_train.interval, time_predict.interval))
###Output
Training takes 10.77s and predicting takes 0.93s.
###Markdown
It can be seen that the model building/scoring process is fast and the model performance is good. 2.3 Hyperparameter tuning of `xlearn` The following presents a naive approach to tune the parameters of `xlearn`, which is using grid-search of parameter values to find the optimal combinations. It is worth noting that the original [FFM paper](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) gave some hints in terms of the impact of parameters on the sampled Criteo dataset. The following are the parameters that can be tuned in the `xlearn` implementation of FM/FFM algorithm. |Parameter|Description|Default value|Notes||-------------|-----------------|------------------|-----------------||`lr`|Learning rate|0.2|Higher learning rate helps fit a model more efficiently but may also result in overfitting.||`lambda`|Regularization parameter|0.00002|The value needs to be selected empirically to avoid overfitting.||`k`|Dimensionality of the latent factors|4|In FFM the effect of k is not that significant as the algorithm itself considers field where `k` can be small to capture the effect of features within each of the fields.||`init`|Model initialization|0.66|-||`epoch`|Number of epochs|10|Using a larger epoch size will help converge the model to its optimal point|
###Code
param_dict = {
"lr": [0.0001, 0.001, 0.01],
"lambda": [0.001, 0.01, 0.1]
}
param_grid = generate_param_grid(param_dict)
auc_scores = []
with Timer() as time_tune:
for param in param_grid:
ffm_model = xl.create_ffm()
ffm_model.setTrain(train_file)
ffm_model.setValidate(valid_file)
ffm_model.fit(param, model_file)
ffm_model.setTest(test_file)
ffm_model.setSigmoid()
ffm_model.predict(model_file, output_file)
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_scores.append(roc_auc_score(truths, predictions))
print('Tuning by grid search takes {0:.2} min'.format(time_tune.interval / 60))
auc_scores = [float('%.4f' % x) for x in auc_scores]
auc_scores_array = np.reshape(auc_scores, (len(param_dict["lr"]), len(param_dict["lambda"])))
auc_df = pd.DataFrame(
data=auc_scores_array,
index=pd.Index(param_dict["lr"], name="LR"),
columns=pd.Index(param_dict["lambda"], name="Lambda")
)
auc_df
fig, ax = plt.subplots()
sns.heatmap(auc_df, cbar=False, annot=True, fmt=".4g")
###Output
_____no_output_____
###Markdown
More advanced tuning methods like Bayesian Optimization can be used for searching for the optimal model efficiently. The benefit of using, for example, `HyperDrive` from Azure Machine Learning Services, for tuning the parameters, is that, the tuning tasks can be distributed across nodes of a cluster and the optimization can be run concurrently to save the total cost.* Details about how to tune hyper parameters by using Azure Machine Learning Services can be found [here](https://github.com/microsoft/recommenders/tree/master/notebooks/04_model_select_and_optimize).* Note, to enable the tuning task on Azure Machine Learning Services by using HyperDrive, one needs a Docker image to containerize the environment where `xlearn` can be run. The Docker file provided [here](https://github.com/microsoft/recommenders/tree/master/docker) can be used for such purpose. 2.4 Clean up
###Code
tmpdir.cleanup()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Factorization Machine Deep DiveFactorization machine (FM) is one of the representative algorithms that are used for building hybrid recommenders model. The algorithm is powerful in terms of capturing the effects of not just the input features but also their interactions. The algorithm provides better generalization capability and expressiveness compared to other classic algorithms such as SVMs. The most recent research extends the basic FM algorithms by using deep learning techniques, which achieve remarkable improvement in a few practical use cases.This notebook presents a deep dive into the Factorization Machine algorithm, and demonstrates some best practices of using the contemporary FM implementations like [`xlearn`](https://github.com/aksnzhy/xlearn) for dealing with tasks like click-through rate prediction. 1 Factorization Machine 1.1 Factorization Machine FM is an algorithm that uses factorization in prediction tasks with data set of high sparsity. The algorithm was original proposed in [\[1\]](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf). Traditionally, the algorithms such as SVM do not perform well in dealing with highly sparse data that is usually seen in many contemporary problems, e.g., click-through rate prediction, recommendation, etc. FM handles the problem by modeling not just first-order linear components for predicting the label, but also the cross-product of the feature variables in order to capture more generalized correlation between variables and label. In certain occasions, the data that appears in recommendation problems, such as user, item, and feature vectors, can be encoded into a one-hot representation. Under this arrangement, classical algorithms like linear regression and SVM may suffer from the following problems:1. The feature vectors are highly sparse, and thus it makes it hard to optimize the parameters to fit the model efficienly2. Cross-product of features will be sparse as well, and this in turn, reduces the expressiveness of a model if it is designed to capture the high-order interactions between features The FM algorithm is designed to tackle the above two problems by factorizing latent vectors that model the low- and high-order components. The general idea of a FM model is expressed in the following equation: $$\hat{y}(\textbf{x})=w_{0}+\sum^{n}_{i=1}w_{i}x_{i}+\sum^{n}_{i=1}\sum^{n}_{j=i+1}x_{i}x_{j}$$ where $\hat{y}$ and $\textbf{x}$ are the target to predict and input feature vectors, respectively. $w_{i}$ is the model parameters for the first-order component. $$ is the dot product of two latent factors for the second-order interaction of feature variables, and it is defined as $$=\sum^{k}_{f=1}v_{i,f}\cdot v_{j,f}$$ Compared to using fixed parameter for the high-order interaction components, using the factorized vectors increase generalization as well as expressiveness of the model. In addition to this, the computation complexity of the equation (above) is $O(kn)$ where $k$ and $n$ are the dimensionalities of the factorization vector and input feature vector, respectively (see [the paper](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) for detailed discussion). In practice, usually a two-way FM model is used, i.e., only the second-order feature interactions are considered to favor computational efficiency. 1.2 Field-Aware Factorization Machine Field-aware factorization machine (FFM) is an extension to FM. It was originally introduced in [\[2\]](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf). The advantage of FFM over FM is that, it uses different factorized latent factors for different groups of features. The "group" is called "field" in the context of FFM. Putting features into fields resolves the issue that the latent factors shared by features that intuitively represent different categories of information may not well generalize the correlation. Different from the formula for the 2-order cross product as can be seen above in the FM equation, in the FFM settings, the equation changes to $$\theta_{\text{FFM}}(\textbf{w}\textbf{x})=\sum^{n}_{j1=1}\sum^{n}_{j2=j1+1}x_{j1}x_{j2}$$ where $f_1$ and $f_2$ are the fields of $j_1$ and $j_2$, respectively. Compared to FM, the computational complexity increases to $O(n^2k)$. However, since the latent factors in FFM only need to learn the effect within the field, so the $k$ values in FFM is usually much smaller than that in FM. 1.3 FM/FFM extensions In the recent years, FM/FFM extensions were proposed to enhance the model performance further. The new algorithms leverage the powerful deep learning neural network to improve the generalization capability of the original FM/FFM algorithms. Representatives of the such algorithms are summarized as below. Some of them are implemented and demonstrated in the microsoft/recommenders repository. |Algorithm|Notes|References|Example in Microsoft/Recommenders||---------|-----|----------|---------------------------------||DeepFM|Combination of FM and DNN where DNN handles high-order interactions|[\[3\]](https://arxiv.org/abs/1703.04247)|-||xDeepFM|Combination of FM, DNN, and Compressed Interaction Network, for vectorized feature interactions|[\[4\]](https://dl.acm.org/citation.cfm?id=3220023)|[notebook](../00_quick_start/xdeepfm_criteo.ipynb) / [utilities](../../recommenders/models/deeprec/models/xDeepFM.py)||Factorization Machine Supported Neural Network|Use FM user/item weight vectors as input layers for DNN model|[\[5\]](https://link.springer.com/chapter/10.1007/978-3-319-30671-1_4)|-||Product-based Neural Network|An additional product-wise layer between embedding layer and fully connected layer to improve expressiveness of interactions of features across fields|[\[6\]](https://ieeexplore.ieee.org/abstract/document/7837964)|-||Neural Factorization Machines|Improve the factorization part of FM by using stacks of NN layers to improve non-linear expressiveness|[\[7\]](https://dl.acm.org/citation.cfm?id=3080777)|-||Wide and deep|Combination of linear model (wide part) and deep neural network model (deep part) for memorisation and generalization|[\[8\]](https://dl.acm.org/citation.cfm?id=2988454)|[notebook](../00_quick_start/wide_deep_movielens.ipynb) / [utilities](../../recommenders/models/wide_deep)| 2 Factorization Machine Implementation 2.1 Implementations The following table summarizes the implementations of FM/FFM. Some of them (e.g., xDeepFM and VW) are implemented and/or demonstrated in the microsoft/recommenders repository |Implementation|Language|Notes|Examples in Microsoft/Recommenders||-----------------|------------------|------------------|---------------------||[libfm](https://github.com/srendle/libfm)|C++|Implementation of FM algorithm|-||[libffm](https://github.com/ycjuan/libffm)|C++|Original implemenation of FFM algorithm. It is handy in model building, but does not support Python interface|-||[xlearn](https://github.com/aksnzhy/xlearn)|C++ with Python interface|More computationally efficient compared to libffm without loss of modeling effectiveness|[notebook](fm_deep_dive.ipynb)||[Vowpal Wabbit FM](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example)|Online library with estimator API|Easy to use by calling API|[notebook](../02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb) / [utilities](../../recommenders/models/vowpal_wabbit)|[microsoft/recommenders xDeepFM](../../recommenders/models/deeprec/models/xDeepFM.py)|Python|Support flexible interface with different configurations of FM and FM extensions, i.e., LR, FM, and/or CIN|[notebook](../00_quick_start/xdeepfm_criteo.ipynb) / [utilities](../../recommenders/models/deeprec/models/xDeepFM.py)| Other than `libfm` and `libffm`, all the other three can be used in a Python environment. * A deep dive of using Vowbal Wabbit for FM model can be found [here](../02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb)* A quick start of Microsoft xDeepFM algorithm can be found [here](../00_quick_start/xdeepfm_criteo.ipynb). Therefore, in the example below, only code examples and best practices of using `xlearn` are presented. 2.2 xlearn Setups for using `xlearn`.1. `xlearn` is implemented in C++ and has Python bindings, so it can be directly installed as a Python package from PyPI. The installation of `xlearn` is enabled in the [Recommenders repo environment setup script](../../tools/generate_conda_file.py). One can follow the general setup steps to install the environment as required, in which `xlearn` is installed as well.2. NOTE `xlearn` may require some base libraries installed as prerequisites in the system, e.g., `cmake`. After a succesful creation of the environment, one can load the packages to run `xlearn` in a Jupyter notebook or Python script.
###Code
import sys
import os
import papermill as pm
import scrapbook as sb
from tempfile import TemporaryDirectory
import xlearn as xl
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib notebook
from matplotlib import pyplot as plt
from recommenders.utils.constants import SEED
from recommenders.utils.timer import Timer
from recommenders.datasets.download_utils import maybe_download, unzip_file
from recommenders.tuning.parameter_sweep import generate_param_grid
from recommenders.datasets.pandas_df_utils import LibffmConverter
print("System version: {}".format(sys.version))
print("Xlearn version: {}".format(xl.__version__))
###Output
System version: 3.6.13 |Anaconda, Inc.| (default, Feb 23 2021, 21:15:04)
[GCC 7.3.0]
Xlearn version: 0.4.0
###Markdown
In the FM model building, data is usually represented in the libsvm data format. That is, `label feat1:val1 feat2:val2 ...`, where `label` is the target to predict, and `val` is the value to each feature `feat`.FFM algorithm requires data to be represented in the libffm format, where each vector is split into several fields with categorical/numerical features inside. That is, `label field1:feat1:val1 field2:feat2:val2 ...`. In the Microsoft/Recommenders utility functions, [a libffm converter](../../recommenders/dataset/pandas_df_utils.py) is provided to achieve the transformation from a tabular feature vectors to the corresponding libffm representation. For example, the following shows how to transform the format of a synthesized data by using the module of `LibffmConverter`.
###Code
df_feature_original = pd.DataFrame({
'rating': [1, 0, 0, 1, 1],
'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],
'field2': [3, 4, 5, 6, 7],
'field3': [1.0, 2.0, 3.0, 4.0, 5.0],
'field4': ['1', '2', '3', '4', '5']
})
converter = LibffmConverter().fit(df_feature_original, col_rating='rating')
df_out = converter.transform(df_feature_original)
df_out
print('There are in total {0} fields and {1} features.'.format(converter.field_count, converter.feature_count))
###Output
There are in total 4 fields and 10 features.
###Markdown
To illustrate the use of `xlearn`, the following example uses the [Criteo data set](https://labs.criteo.com/category/dataset/), which has already been processed in the libffm format, for building and evaluating a FFM model built by using `xlearn`. Sometimes, it is important to know the total numbers of fields and features. When building a FFM model, `xlearn` can count these numbers automatically.
###Code
# Model parameters
LEARNING_RATE = 0.2
LAMBDA = 0.002
EPOCH = 10
OPT_METHOD = "sgd" # options are "sgd", "adagrad" and "ftrl"
# The metrics for binary classification options are "acc", "prec", "f1" and "auc"
# for regression, options are "rmse", "mae", "mape"
METRIC = "auc"
# Paths
YAML_FILE_NAME = "xDeepFM.yaml"
TRAIN_FILE_NAME = "cretio_tiny_train"
VALID_FILE_NAME = "cretio_tiny_valid"
TEST_FILE_NAME = "cretio_tiny_test"
MODEL_FILE_NAME = "model.out"
OUTPUT_FILE_NAME = "output.txt"
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
yaml_file = os.path.join(data_path, YAML_FILE_NAME)
train_file = os.path.join(data_path, TRAIN_FILE_NAME)
valid_file = os.path.join(data_path, VALID_FILE_NAME)
test_file = os.path.join(data_path, TEST_FILE_NAME)
model_file = os.path.join(data_path, MODEL_FILE_NAME)
output_file = os.path.join(data_path, OUTPUT_FILE_NAME)
assets_url = "https://recodatasets.z20.web.core.windows.net/deeprec/xdeepfmresources.zip"
assets_file = maybe_download(assets_url, work_directory=data_path)
unzip_file(assets_file, data_path)
###Output
100%|██████████| 10.3k/10.3k [00:00<00:00, 55.9kKB/s]
###Markdown
The following steps are from the [official documentation of `xlearn`](https://xlearn-doc.readthedocs.io/en/latest/index.html) for building a model. To begin with, we do not modify any training parameter values. NOTE, if `xlearn` is run through command line, the training process can be displayed in the console.
###Code
# Training task
ffm_model = xl.create_ffm() # Use field-aware factorization machine (ffm)
ffm_model.setTrain(train_file) # Set the path of training dataset
ffm_model.setValidate(valid_file) # Set the path of validation dataset
# Parameters:
# 0. task: binary classification
# 1. learning rate: 0.2
# 2. regular lambda: 0.002
# 3. evaluation metric: auc
# 4. number of epochs: 10
# 5. optimization method: sgd
param = {"task":"binary",
"lr": LEARNING_RATE,
"lambda": LAMBDA,
"metric": METRIC,
"epoch": EPOCH,
"opt": OPT_METHOD
}
# Start to train
# The trained model will be stored in model.out
with Timer() as time_train:
ffm_model.fit(param, model_file)
print(f"Training time: {time_train}")
# Prediction task
ffm_model.setTest(test_file) # Set the path of test dataset
ffm_model.setSigmoid() # Convert output to 0-1
# Start to predict
# The output result will be stored in output.txt
with Timer() as time_predict:
ffm_model.predict(model_file, output_file)
print(f"Prediction time: {time_predict}")
###Output
Prediction time: 0.6435
###Markdown
The output are the predicted labels (i.e., 1 or 0) for the testing data set. AUC score is calculated to evaluate the model performance.
###Code
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_score = roc_auc_score(truths, predictions)
auc_score
sb.glue('auc_score', auc_score)
###Output
_____no_output_____
###Markdown
It can be seen that the model building/scoring process is fast and the model performance is good. 2.3 Hyperparameter tuning of `xlearn` The following presents a naive approach to tune the parameters of `xlearn`, which is using grid-search of parameter values to find the optimal combinations. It is worth noting that the original [FFM paper](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) gave some hints in terms of the impact of parameters on the sampled Criteo dataset. The following are the parameters that can be tuned in the `xlearn` implementation of FM/FFM algorithm. |Parameter|Description|Default value|Notes||-------------|-----------------|------------------|-----------------||`lr`|Learning rate|0.2|Higher learning rate helps fit a model more efficiently but may also result in overfitting.||`lambda`|Regularization parameter|0.00002|The value needs to be selected empirically to avoid overfitting.||`k`|Dimensionality of the latent factors|4|In FFM the effect of k is not that significant as the algorithm itself considers field where `k` can be small to capture the effect of features within each of the fields.||`init`|Model initialization|0.66|-||`epoch`|Number of epochs|10|Using a larger epoch size will help converge the model to its optimal point|
###Code
param_dict = {
"lr": [0.0001, 0.001, 0.01],
"lambda": [0.001, 0.01, 0.1]
}
param_grid = generate_param_grid(param_dict)
auc_scores = []
with Timer() as time_tune:
for param in param_grid:
ffm_model = xl.create_ffm()
ffm_model.setTrain(train_file)
ffm_model.setValidate(valid_file)
ffm_model.fit(param, model_file)
ffm_model.setTest(test_file)
ffm_model.setSigmoid()
ffm_model.predict(model_file, output_file)
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_scores.append(roc_auc_score(truths, predictions))
print('Tuning by grid search takes {0:.2} min'.format(time_tune.interval / 60))
auc_scores = [float('%.4f' % x) for x in auc_scores]
auc_scores_array = np.reshape(auc_scores, (len(param_dict["lr"]), len(param_dict["lambda"])))
auc_df = pd.DataFrame(
data=auc_scores_array,
index=pd.Index(param_dict["lr"], name="LR"),
columns=pd.Index(param_dict["lambda"], name="Lambda")
)
auc_df
fig, ax = plt.subplots()
sns.heatmap(auc_df, cbar=False, annot=True, fmt=".4g")
###Output
_____no_output_____
###Markdown
More advanced tuning methods like Bayesian Optimization can be used for searching for the optimal model efficiently. The benefit of using, for example, `HyperDrive` from Azure Machine Learning Services, for tuning the parameters, is that, the tuning tasks can be distributed across nodes of a cluster and the optimization can be run concurrently to save the total cost.* Details about how to tune hyper parameters by using Azure Machine Learning Services can be found [here](https://github.com/microsoft/recommenders/tree/master/notebooks/04_model_select_and_optimize).* Note, to enable the tuning task on Azure Machine Learning Services by using HyperDrive, one needs a Docker image to containerize the environment where `xlearn` can be run. The Docker file provided [here](https://github.com/microsoft/recommenders/tree/master/docker) can be used for such purpose. 2.4 Clean up
###Code
tmpdir.cleanup()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Factorization Machine Deep DiveFactorization machine (FM) is one of the representative algorithms that are used for building hybrid recommenders model. The algorithm is powerful in terms of capturing the effects of not just the input features but also their interactions. The algorithm provides better generalization capability and expressiveness compared to other classic algorithms such as SVMs. The most recent research extends the basic FM algorithms by using deep learning techniques, which achieve remarkable improvement in a few practical use cases.This notebook presents a deep dive into the Factorization Machine algorithm, and demonstrates some best practices of using the contemporary FM implementations like [`xlearn`](https://github.com/aksnzhy/xlearn) for dealing with tasks like click-through rate prediction. 1 Factorization Machine 1.1 Factorization Machine FM is an algorithm that uses factorization in prediction tasks with data set of high sparsity. The algorithm was original proposed in [\[1\]](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf). Traditionally, the algorithms such as SVM do not perform well in dealing with highly sparse data that is usually seen in many contemporary problems, e.g., click-through rate prediction, recommendation, etc. FM handles the problem by modeling not just first-order linear components for predicting the label, but also the cross-product of the feature variables in order to capture more generalized correlation between variables and label. In certain occasions, the data that appears in recommendation problems, such as user, item, and feature vectors, can be encoded into a one-hot representation. Under this arrangement, classical algorithms like linear regression and SVM may suffer from the following problems:1. The feature vectors are highly sparse, and thus it makes it hard to optimize the parameters to fit the model efficienly2. Cross-product of features will be sparse as well, and this in turn, reduces the expressiveness of a model if it is designed to capture the high-order interactions between features The FM algorithm is designed to tackle the above two problems by factorizing latent vectors that model the low- and high-order components. The general idea of a FM model is expressed in the following equation: $$\hat{y}(\textbf{x})=w_{0}+\sum^{n}_{i=1}w_{i}x_{i}+\sum^{n}_{i=1}\sum^{n}_{j=i+1}x_{i}x_{j}$$ where $\hat{y}$ and $\textbf{x}$ are the target to predict and input feature vectors, respectively. $w_{i}$ is the model parameters for the first-order component. $$ is the dot product of two latent factors for the second-order interaction of feature variables, and it is defined as $$=\sum^{k}_{f=1}v_{i,f}\cdot v_{j,f}$$ Compared to using fixed parameter for the high-order interaction components, using the factorized vectors increase generalization as well as expressiveness of the model. In addition to this, the computation complexity of the equation (above) is $O(kn)$ where $k$ and $n$ are the dimensionalities of the factorization vector and input feature vector, respectively (see [the paper](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) for detailed discussion). In practice, usually a two-way FM model is used, i.e., only the second-order feature interactions are considered to favor computational efficiency. 1.2 Field-Aware Factorization Machine Field-aware factorization machine (FFM) is an extension to FM. It was originally introduced in [\[2\]](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf). The advantage of FFM over FM is that, it uses different factorized latent factors for different groups of features. The "group" is called "field" in the context of FFM. Putting features into fields resolves the issue that the latent factors shared by features that intuitively represent different categories of information may not well generalize the correlation. Different from the formula for the 2-order cross product as can be seen above in the FM equation, in the FFM settings, the equation changes to $$\theta_{\text{FFM}}(\textbf{w}\textbf{x})=\sum^{n}_{j1=1}\sum^{n}_{j2=j1+1}x_{j1}x_{j2}$$ where $f_1$ and $f_2$ are the fields of $j_1$ and $j_2$, respectively. Compared to FM, the computational complexity increases to $O(n^2k)$. However, since the latent factors in FFM only need to learn the effect within the field, so the $k$ values in FFM is usually much smaller than that in FM. 1.3 FM/FFM extensions In the recent years, FM/FFM extensions were proposed to enhance the model performance further. The new algorithms leverage the powerful deep learning neural network to improve the generalization capability of the original FM/FFM algorithms. Representatives of the such algorithms are summarized as below. Some of them are implemented and demonstrated in the microsoft/recommenders repository. |Algorithm|Notes|References|Example in Microsoft/Recommenders||--------------------|---------------------|------------------------||DeepFM|Combination of FM and DNN where DNN handles high-order interactions|[\[3\]](https://arxiv.org/abs/1703.04247)|-||xDeepFM|Combination of FM, DNN, and Compressed Interaction Network, for vectorized feature interactions|[\[4\]](https://dl.acm.org/citation.cfm?id=3220023)|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/xdeepfm_criteo.ipynb) / [utilities](https://github.com/microsoft/recommenders/blob/master/reco_utils/recommender/deeprec/models/xDeepFM.py)||Factorization Machine Supported Neural Network|Use FM user/item weight vectors as input layers for DNN model|[\[5\]](https://link.springer.com/chapter/10.1007/978-3-319-30671-1_4)|-||Product-based Neural Network|An additional product-wise layer between embedding layer and fully connected layer to improve expressiveness of interactions of features across fields|[\[6\]](https://ieeexplore.ieee.org/abstract/document/7837964)|-||Neural Factorization Machines|Improve the factorization part of FM by using stacks of NN layers to improve non-linear expressiveness|[\[7\]](https://dl.acm.org/citation.cfm?id=3080777)|-||Wide and deep|Combination of linear model (wide part) and deep neural network model (deep part) for memorisation and generalization|[\[8\]](https://dl.acm.org/citation.cfm?id=2988454)|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/wide_deep_movielens.ipynb) / [utilities](https://github.com/microsoft/recommenders/tree/master/reco_utils/recommender/wide_deep)| 2 Factorization Machine Implementation 2.1 Implementations The following table summarizes the implementations of FM/FFM. Some of them (e.g., xDeepFM and VW) are implemented and/or demonstrated in the microsoft/recommenders repository |Implementation|Language|Notes|Examples in Microsoft/Recommenders||-----------------|------------------|------------------|---------------------||[libfm](https://github.com/srendle/libfm)|C++|Implementation of FM algorithm|-||[libffm](https://github.com/ycjuan/libffm)|C++|Original implemenation of FFM algorithm. It is handy in model building, but does not support Python interface|-||[xlearn](https://github.com/aksnzhy/xlearn)|C++ with Python interface|More computationally efficient compared to libffm without loss of modeling effectiveness|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/02_model/fm_deep_dive.ipynb)||[Vowpal Wabbit FM](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example)|Online library with estimator API|Easy to use by calling API|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/02_model/vowpal_wabbit_deep_dive.ipynb) / [utilities](https://github.com/microsoft/recommenders/tree/master/reco_utils/recommender/vowpal_wabbit)|[microsoft/recommenders xDeepFM](https://github.com/microsoft/recommenders/blob/master/reco_utils/recommender/deeprec/models/xDeepFM.py)|Python|Support flexible interface with different configurations of FM and FM extensions, i.e., LR, FM, and/or CIN|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/xdeepfm_criteo.ipynb) / [utilities](https://github.com/microsoft/recommenders/blob/master/reco_utils/recommender/deeprec/models/xDeepFM.py)| Other than `libfm` and `libffm`, all the other three can be used in a Python environment. * A deep dive of using Vowbal Wabbit for FM model can be found [here](https://github.com/microsoft/recommenders/blob/master/notebooks/02_model/vowpal_wabbit_deep_dive.ipynb)* A quick start of Microsoft xDeepFM algorithm can be found [here](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/xdeepfm_criteo.ipynb). Therefore, in the example below, only code examples and best practices of using `xlearn` are presented. 2.2 xlearn Setups for using `xlearn`.1. `xlearn` is implemented in C++ and has Python bindings, so it can be directly installed as a Python package from PyPI. The installation of `xlearn` is enabled in the [Recommenders repo environment setup script](../../tools/generate_conda_file.py). One can follow the general setup steps to install the environment as required, in which `xlearn` is installed as well.2. NOTE `xlearn` may require some base libraries installed as prerequisites in the system, e.g., `cmake`. After a succesful creation of the environment, one can load the packages to run `xlearn` in a Jupyter notebook or Python script.
###Code
import sys
sys.path.append("../../")
import os
import papermill as pm
from tempfile import TemporaryDirectory
import xlearn as xl
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib notebook
from matplotlib import pyplot as plt
from reco_utils.common.constants import SEED
from reco_utils.common.timer import Timer
from reco_utils.recommender.deeprec.deeprec_utils import (
download_deeprec_resources, prepare_hparams
)
from reco_utils.recommender.deeprec.models.xDeepFM import XDeepFMModel
from reco_utils.recommender.deeprec.io.iterator import FFMTextIterator
from reco_utils.tuning.parameter_sweep import generate_param_grid
from reco_utils.dataset.pandas_df_utils import LibffmConverter
print("System version: {}".format(sys.version))
print("Xlearn version: {}".format(xl.__version__))
###Output
System version: 3.6.8 |Anaconda, Inc.| (default, Dec 30 2018, 01:22:34)
[GCC 7.3.0]
Xlearn version: 0.4.0
###Markdown
In the FM model building, data is usually represented in the libsvm data format. That is, `label feat1:val1 feat2:val2 ...`, where `label` is the target to predict, and `val` is the value to each feature `feat`.FFM algorithm requires data to be represented in the libffm format, where each vector is split into several fields with categorical/numerical features inside. That is, `label field1:feat1:val1 field2:feat2:val2 ...`. In the Microsoft/Recommenders utility functions, [a libffm converter](https://github.com/microsoft/recommenders/blob/290dd920d4a6a4d3bff71dd9ee7273be0c02dbbc/reco_utils/dataset/pandas_df_utils.pyL86) is provided to achieve the transformation from a tabular feature vectors to the corresponding libffm representation. For example, the following shows how to transform the format of a synthesized data by using the module of `LibffmConverter`.
###Code
df_feature_original = pd.DataFrame({
'rating': [1, 0, 0, 1, 1],
'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],
'field2': [3, 4, 5, 6, 7],
'field3': [1.0, 2.0, 3.0, 4.0, 5.0],
'field4': ['1', '2', '3', '4', '5']
})
converter = LibffmConverter().fit(df_feature_original, col_rating='rating')
df_out = converter.transform(df_feature_original)
df_out
print('There are in total {0} fields and {1} features.'.format(converter.field_count, converter.feature_count))
###Output
There are in total 4 fields and 10 features.
###Markdown
To illustrate the use of `xlearn`, the following example uses the [Criteo data set](https://labs.criteo.com/category/dataset/), which has already been processed in the libffm format, for building and evaluating a FFM model built by using `xlearn`. Sometimes, it is important to know the total numbers of fields and features. When building a FFM model, `xlearn` can count these numbers automatically.
###Code
# Parameters
YAML_FILE_NAME = "xDeepFM.yaml"
TRAIN_FILE_NAME = "cretio_tiny_train"
VALID_FILE_NAME = "cretio_tiny_valid"
TEST_FILE_NAME = "cretio_tiny_test"
MODEL_FILE_NAME = "model.out"
OUTPUT_FILE_NAME = "output.txt"
LEARNING_RATE = 0.2
LAMBDA = 0.002
# The metrics for binary classification options are "acc", "prec", "f1" and "auc"
# for regression, options are "rmse", "mae", "mape"
METRIC = "auc"
EPOCH = 10
OPT_METHOD = "sgd" # options are "sgd", "adagrad" and "ftrl"
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
yaml_file = os.path.join(data_path, YAML_FILE_NAME)
train_file = os.path.join(data_path, TRAIN_FILE_NAME)
valid_file = os.path.join(data_path, VALID_FILE_NAME)
test_file = os.path.join(data_path, TEST_FILE_NAME)
model_file = os.path.join(data_path, MODEL_FILE_NAME)
output_file = os.path.join(data_path, OUTPUT_FILE_NAME)
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.blob.core.windows.net/deeprec/', data_path, 'xdeepfmresources.zip')
###Output
100%|██████████| 10.3k/10.3k [00:01<00:00, 8.67kKB/s]
###Markdown
The following steps are from the [official documentation of `xlearn`](https://xlearn-doc.readthedocs.io/en/latest/index.html) for building a model. To begin with, we do not modify any training parameter values. NOTE, if `xlearn` is run through command line, the training process can be displayed in the console.
###Code
# Training task
ffm_model = xl.create_ffm() # Use field-aware factorization machine (ffm)
ffm_model.setTrain(train_file) # Set the path of training dataset
ffm_model.setValidate(valid_file) # Set the path of validation dataset
# Parameters:
# 0. task: binary classification
# 1. learning rate: 0.2
# 2. regular lambda: 0.002
# 3. evaluation metric: auc
# 4. number of epochs: 10
# 5. optimization method: sgd
param = {"task":"binary",
"lr": LEARNING_RATE,
"lambda": LAMBDA,
"metric": METRIC,
"epoch": EPOCH,
"opt": OPT_METHOD
}
# Start to train
# The trained model will be stored in model.out
with Timer() as time_train:
ffm_model.fit(param, model_file)
# Prediction task
ffm_model.setTest(test_file) # Set the path of test dataset
ffm_model.setSigmoid() # Convert output to 0-1
# Start to predict
# The output result will be stored in output.txt
with Timer() as time_predict:
ffm_model.predict(model_file, output_file)
###Output
_____no_output_____
###Markdown
The output are the predicted labels (i.e., 1 or 0) for the testing data set. AUC score is calculated to evaluate the model performance.
###Code
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_score = roc_auc_score(truths, predictions)
auc_score
pm.record('auc_score', auc_score)
print('Training takes {0:.2f}s and predicting takes {1:.2f}s.'.format(time_train.interval, time_predict.interval))
###Output
Training takes 10.77s and predicting takes 0.93s.
###Markdown
It can be seen that the model building/scoring process is fast and the model performance is good. 2.3 Hyperparameter tuning of `xlearn` The following presents a naive approach to tune the parameters of `xlearn`, which is using grid-search of parameter values to find the optimal combinations. It is worth noting that the original [FFM paper](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) gave some hints in terms of the impact of parameters on the sampled Criteo dataset. The following are the parameters that can be tuned in the `xlearn` implementation of FM/FFM algorithm. |Parameter|Description|Default value|Notes||-------------|-----------------|------------------|-----------------||`lr`|Learning rate|0.2|Higher learning rate helps fit a model more efficiently but may also result in overfitting.||`lambda`|Regularization parameter|0.00002|The value needs to be selected empirically to avoid overfitting.||`k`|Dimensionality of the latent factors|4|In FFM the effect of k is not that significant as the algorithm itself considers field where `k` can be small to capture the effect of features within each of the fields.||`init`|Model initialization|0.66|-||`epoch`|Number of epochs|10|Using a larger epoch size will help converge the model to its optimal point|
###Code
param_dict = {
"lr": [0.0001, 0.001, 0.01],
"lambda": [0.001, 0.01, 0.1]
}
param_grid = generate_param_grid(param_dict)
auc_scores = []
with Timer() as time_tune:
for param in param_grid:
ffm_model = xl.create_ffm()
ffm_model.setTrain(train_file)
ffm_model.setValidate(valid_file)
ffm_model.fit(param, model_file)
ffm_model.setTest(test_file)
ffm_model.setSigmoid()
ffm_model.predict(model_file, output_file)
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_scores.append(roc_auc_score(truths, predictions))
print('Tuning by grid search takes {0:.2} min'.format(time_tune.interval / 60))
auc_scores = [float('%.4f' % x) for x in auc_scores]
auc_scores_array = np.reshape(auc_scores, (len(param_dict["lr"]), len(param_dict["lambda"])))
auc_df = pd.DataFrame(
data=auc_scores_array,
index=pd.Index(param_dict["lr"], name="LR"),
columns=pd.Index(param_dict["lambda"], name="Lambda")
)
auc_df
fig, ax = plt.subplots()
sns.heatmap(auc_df, cbar=False, annot=True, fmt=".4g")
###Output
_____no_output_____
###Markdown
More advanced tuning methods like Bayesian Optimization can be used for searching for the optimal model efficiently. The benefit of using, for example, `HyperDrive` from Azure Machine Learning Services, for tuning the parameters, is that, the tuning tasks can be distributed across nodes of a cluster and the optimization can be run concurrently to save the total cost.* Details about how to tune hyper parameters by using Azure Machine Learning Services can be found [here](https://github.com/microsoft/recommenders/tree/master/notebooks/04_model_select_and_optimize).* Note, to enable the tuning task on Azure Machine Learning Services by using HyperDrive, one needs a Docker image to containerize the environment where `xlearn` can be run. The Docker file provided [here](https://github.com/microsoft/recommenders/tree/master/docker) can be used for such purpose. 2.4 Clean up
###Code
tmpdir.cleanup()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Factorization Machine Deep DiveFactorization machine (FM) is one of the representative algorithms that are used for building hybrid recommenders model. The algorithm is powerful in terms of capturing the effects of not just the input features but also their interactions. The algorithm provides better generalization capability and expressiveness compared to other classic algorithms such as SVMs. The most recent research extends the basic FM algorithms by using deep learning techniques, which achieve remarkable improvement in a few practical use cases.This notebook presents a deep dive into the Factorization Machine algorithm, and demonstrates some best practices of using the contemporary FM implementations like [`xlearn`](https://github.com/aksnzhy/xlearn) for dealing with tasks like click-through rate prediction. 1 Factorization Machine 1.1 Factorization Machine FM is an algorithm that uses factorization in prediction tasks with data set of high sparsity. The algorithm was original proposed in [\[1\]](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf). Traditionally, the algorithms such as SVM do not perform well in dealing with highly sparse data that is usually seen in many contemporary problems, e.g., click-through rate prediction, recommendation, etc. FM handles the problem by modeling not just first-order linear components for predicting the label, but also the cross-product of the feature variables in order to capture more generalized correlation between variables and label. In certain occasions, the data that appears in recommendation problems, such as user, item, and feature vectors, can be encoded into a one-hot representation. Under this arrangement, classical algorithms like linear regression and SVM may suffer from the following problems:1. The feature vectors are highly sparse, and thus it makes it hard to optimize the parameters to fit the model efficienly2. Cross-product of features will be sparse as well, and this in turn, reduces the expressiveness of a model if it is designed to capture the high-order interactions between features The FM algorithm is designed to tackle the above two problems by factorizing latent vectors that model the low- and high-order components. The general idea of a FM model is expressed in the following equation: $$\hat{y}(\textbf{x})=w_{0}+\sum^{n}_{i=1}w_{i}x_{i}+\sum^{n}_{i=1}\sum^{n}_{j=i+1}x_{i}x_{j}$$ where $\hat{y}$ and $\textbf{x}$ are the target to predict and input feature vectors, respectively. $w_{i}$ is the model parameters for the first-order component. $$ is the dot product of two latent factors for the second-order interaction of feature variables, and it is defined as $$=\sum^{k}_{f=1}v_{i,f}\cdot v_{j,f}$$ Compared to using fixed parameter for the high-order interaction components, using the factorized vectors increase generalization as well as expressiveness of the model. In addition to this, the computation complexity of the equation (above) is $O(kn)$ where $k$ and $n$ are the dimensionalities of the factorization vector and input feature vector, respectively (see [the paper](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) for detailed discussion). In practice, usually a two-way FM model is used, i.e., only the second-order feature interactions are considered to favor computational efficiency. 1.2 Field-Aware Factorization Machine Field-aware factorization machine (FFM) is an extension to FM. It was originally introduced in [\[2\]](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf). The advantage of FFM over FM is that, it uses different factorized latent factors for different groups of features. The "group" is called "field" in the context of FFM. Putting features into fields resolves the issue that the latent factors shared by features that intuitively represent different categories of information may not well generalize the correlation. Different from the formula for the 2-order cross product as can be seen above in the FM equation, in the FFM settings, the equation changes to $$\theta_{\text{FFM}}(\textbf{w}\textbf{x})=\sum^{n}_{j1=1}\sum^{n}_{j2=j1+1}x_{j1}x_{j2}$$ where $f_1$ and $f_2$ are the fields of $j_1$ and $j_2$, respectively. Compared to FM, the computational complexity increases to $O(n^2k)$. However, since the latent factors in FFM only need to learn the effect within the field, so the $k$ values in FFM is usually much smaller than that in FM. 1.3 FM/FFM extensions In the recent years, FM/FFM extensions were proposed to enhance the model performance further. The new algorithms leverage the powerful deep learning neural network to improve the generalization capability of the original FM/FFM algorithms. Representatives of the such algorithms are summarized as below. Some of them are implemented and demonstrated in the microsoft/recommenders repository. |Algorithm|Notes|References|Example in Microsoft/Recommenders||--------------------|---------------------|------------------------||DeepFM|Combination of FM and DNN where DNN handles high-order interactions|[\[3\]](https://arxiv.org/abs/1703.04247)|-||xDeepFM|Combination of FM, DNN, and Compressed Interaction Network, for vectorized feature interactions|[\[4\]](https://dl.acm.org/citation.cfm?id=3220023)|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/xdeepfm_criteo.ipynb) / [utilities](https://github.com/microsoft/recommenders/blob/master/reco_utils/recommender/deeprec/models/xDeepFM.py)||Factorization Machine Supported Neural Network|Use FM user/item weight vectors as input layers for DNN model|[\[5\]](https://link.springer.com/chapter/10.1007/978-3-319-30671-1_4)|-||Product-based Neural Network|An additional product-wise layer between embedding layer and fully connected layer to improve expressiveness of interactions of features across fields|[\[6\]](https://ieeexplore.ieee.org/abstract/document/7837964)|-||Neural Factorization Machines|Improve the factorization part of FM by using stacks of NN layers to improve non-linear expressiveness|[\[7\]](https://dl.acm.org/citation.cfm?id=3080777)|-||Wide and deep|Combination of linear model (wide part) and deep neural network model (deep part) for memorisation and generalization|[\[8\]](https://dl.acm.org/citation.cfm?id=2988454)|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/wide_deep_movielens.ipynb) / [utilities](https://github.com/microsoft/recommenders/tree/master/reco_utils/recommender/wide_deep)| 2 Factorization Machine Implementation 2.1 Implementations The following table summarizes the implementations of FM/FFM. Some of them (e.g., xDeepFM and VW) are implemented and/or demonstrated in the microsoft/recommenders repository |Implementation|Language|Notes|Examples in Microsoft/Recommenders||-----------------|------------------|------------------|---------------------||[libfm](https://github.com/srendle/libfm)|C++|Implementation of FM algorithm|-||[libffm](https://github.com/ycjuan/libffm)|C++|Original implemenation of FFM algorithm. It is handy in model building, but does not support Python interface|-||[xlearn](https://github.com/aksnzhy/xlearn)|C++ with Python interface|More computationally efficient compared to libffm without loss of modeling effectiveness|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/02_model/fm_deep_dive.ipynb)||[Vowpal Wabbit FM](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example)|Online library with estimator API|Easy to use by calling API|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/02_model/vowpal_wabbit_deep_dive.ipynb) / [utilities](https://github.com/microsoft/recommenders/tree/master/reco_utils/recommender/vowpal_wabbit)|[microsoft/recommenders xDeepFM](https://github.com/microsoft/recommenders/blob/master/reco_utils/recommender/deeprec/models/xDeepFM.py)|Python|Support flexible interface with different configurations of FM and FM extensions, i.e., LR, FM, and/or CIN|[notebook](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/xdeepfm_criteo.ipynb) / [utilities](https://github.com/microsoft/recommenders/blob/master/reco_utils/recommender/deeprec/models/xDeepFM.py)| Other than `libfm` and `libffm`, all the other three can be used in a Python environment. * A deep dive of using Vowbal Wabbit for FM model can be found [here](https://github.com/microsoft/recommenders/blob/master/notebooks/02_model/vowpal_wabbit_deep_dive.ipynb)* A quick start of Microsoft xDeepFM algorithm can be found [here](https://github.com/microsoft/recommenders/blob/master/notebooks/00_quick_start/xdeepfm_criteo.ipynb). Therefore, in the example below, only code examples and best practices of using `xlearn` are presented. 2.2 xlearn Setups for using `xlearn`.1. `xlearn` is implemented in C++ and has Python bindings, so it can be directly installed as a Python package from PyPI. The installation of `xlearn` is enabled in the [Recommenders repo environment setup script](../../tools/generate_conda_file.py). One can follow the general setup steps to install the environment as required, in which `xlearn` is installed as well.2. NOTE `xlearn` may require some base libraries installed as prerequisites in the system, e.g., `cmake`. After a succesful creation of the environment, one can load the packages to run `xlearn` in a Jupyter notebook or Python script.
###Code
import sys
sys.path.append("../../")
import os
import papermill as pm
import scrapbook as sb
from tempfile import TemporaryDirectory
import xlearn as xl
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib notebook
from matplotlib import pyplot as plt
from reco_utils.common.constants import SEED
from reco_utils.common.timer import Timer
from reco_utils.recommender.deeprec.deeprec_utils import (
download_deeprec_resources, prepare_hparams
)
from reco_utils.recommender.deeprec.models.xDeepFM import XDeepFMModel
from reco_utils.recommender.deeprec.io.iterator import FFMTextIterator
from reco_utils.tuning.parameter_sweep import generate_param_grid
from reco_utils.dataset.pandas_df_utils import LibffmConverter
print("System version: {}".format(sys.version))
print("Xlearn version: {}".format(xl.__version__))
###Output
System version: 3.6.8 |Anaconda, Inc.| (default, Dec 30 2018, 01:22:34)
[GCC 7.3.0]
Xlearn version: 0.4.0
###Markdown
In the FM model building, data is usually represented in the libsvm data format. That is, `label feat1:val1 feat2:val2 ...`, where `label` is the target to predict, and `val` is the value to each feature `feat`.FFM algorithm requires data to be represented in the libffm format, where each vector is split into several fields with categorical/numerical features inside. That is, `label field1:feat1:val1 field2:feat2:val2 ...`. In the Microsoft/Recommenders utility functions, [a libffm converter](https://github.com/microsoft/recommenders/blob/290dd920d4a6a4d3bff71dd9ee7273be0c02dbbc/reco_utils/dataset/pandas_df_utils.pyL86) is provided to achieve the transformation from a tabular feature vectors to the corresponding libffm representation. For example, the following shows how to transform the format of a synthesized data by using the module of `LibffmConverter`.
###Code
df_feature_original = pd.DataFrame({
'rating': [1, 0, 0, 1, 1],
'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],
'field2': [3, 4, 5, 6, 7],
'field3': [1.0, 2.0, 3.0, 4.0, 5.0],
'field4': ['1', '2', '3', '4', '5']
})
converter = LibffmConverter().fit(df_feature_original, col_rating='rating')
df_out = converter.transform(df_feature_original)
df_out
print('There are in total {0} fields and {1} features.'.format(converter.field_count, converter.feature_count))
###Output
There are in total 4 fields and 10 features.
###Markdown
To illustrate the use of `xlearn`, the following example uses the [Criteo data set](https://labs.criteo.com/category/dataset/), which has already been processed in the libffm format, for building and evaluating a FFM model built by using `xlearn`. Sometimes, it is important to know the total numbers of fields and features. When building a FFM model, `xlearn` can count these numbers automatically.
###Code
# Parameters
YAML_FILE_NAME = "xDeepFM.yaml"
TRAIN_FILE_NAME = "cretio_tiny_train"
VALID_FILE_NAME = "cretio_tiny_valid"
TEST_FILE_NAME = "cretio_tiny_test"
MODEL_FILE_NAME = "model.out"
OUTPUT_FILE_NAME = "output.txt"
LEARNING_RATE = 0.2
LAMBDA = 0.002
# The metrics for binary classification options are "acc", "prec", "f1" and "auc"
# for regression, options are "rmse", "mae", "mape"
METRIC = "auc"
EPOCH = 10
OPT_METHOD = "sgd" # options are "sgd", "adagrad" and "ftrl"
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
yaml_file = os.path.join(data_path, YAML_FILE_NAME)
train_file = os.path.join(data_path, TRAIN_FILE_NAME)
valid_file = os.path.join(data_path, VALID_FILE_NAME)
test_file = os.path.join(data_path, TEST_FILE_NAME)
model_file = os.path.join(data_path, MODEL_FILE_NAME)
output_file = os.path.join(data_path, OUTPUT_FILE_NAME)
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.blob.core.windows.net/deeprec/', data_path, 'xdeepfmresources.zip')
###Output
100%|██████████| 10.3k/10.3k [00:01<00:00, 8.67kKB/s]
###Markdown
The following steps are from the [official documentation of `xlearn`](https://xlearn-doc.readthedocs.io/en/latest/index.html) for building a model. To begin with, we do not modify any training parameter values. NOTE, if `xlearn` is run through command line, the training process can be displayed in the console.
###Code
# Training task
ffm_model = xl.create_ffm() # Use field-aware factorization machine (ffm)
ffm_model.setTrain(train_file) # Set the path of training dataset
ffm_model.setValidate(valid_file) # Set the path of validation dataset
# Parameters:
# 0. task: binary classification
# 1. learning rate: 0.2
# 2. regular lambda: 0.002
# 3. evaluation metric: auc
# 4. number of epochs: 10
# 5. optimization method: sgd
param = {"task":"binary",
"lr": LEARNING_RATE,
"lambda": LAMBDA,
"metric": METRIC,
"epoch": EPOCH,
"opt": OPT_METHOD
}
# Start to train
# The trained model will be stored in model.out
with Timer() as time_train:
ffm_model.fit(param, model_file)
# Prediction task
ffm_model.setTest(test_file) # Set the path of test dataset
ffm_model.setSigmoid() # Convert output to 0-1
# Start to predict
# The output result will be stored in output.txt
with Timer() as time_predict:
ffm_model.predict(model_file, output_file)
###Output
_____no_output_____
###Markdown
The output are the predicted labels (i.e., 1 or 0) for the testing data set. AUC score is calculated to evaluate the model performance.
###Code
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_score = roc_auc_score(truths, predictions)
auc_score
sb.glue('auc_score', auc_score)
print('Training takes {0:.2f}s and predicting takes {1:.2f}s.'.format(time_train.interval, time_predict.interval))
###Output
Training takes 10.77s and predicting takes 0.93s.
###Markdown
It can be seen that the model building/scoring process is fast and the model performance is good. 2.3 Hyperparameter tuning of `xlearn` The following presents a naive approach to tune the parameters of `xlearn`, which is using grid-search of parameter values to find the optimal combinations. It is worth noting that the original [FFM paper](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) gave some hints in terms of the impact of parameters on the sampled Criteo dataset. The following are the parameters that can be tuned in the `xlearn` implementation of FM/FFM algorithm. |Parameter|Description|Default value|Notes||-------------|-----------------|------------------|-----------------||`lr`|Learning rate|0.2|Higher learning rate helps fit a model more efficiently but may also result in overfitting.||`lambda`|Regularization parameter|0.00002|The value needs to be selected empirically to avoid overfitting.||`k`|Dimensionality of the latent factors|4|In FFM the effect of k is not that significant as the algorithm itself considers field where `k` can be small to capture the effect of features within each of the fields.||`init`|Model initialization|0.66|-||`epoch`|Number of epochs|10|Using a larger epoch size will help converge the model to its optimal point|
###Code
param_dict = {
"lr": [0.0001, 0.001, 0.01],
"lambda": [0.001, 0.01, 0.1]
}
param_grid = generate_param_grid(param_dict)
auc_scores = []
with Timer() as time_tune:
for param in param_grid:
ffm_model = xl.create_ffm()
ffm_model.setTrain(train_file)
ffm_model.setValidate(valid_file)
ffm_model.fit(param, model_file)
ffm_model.setTest(test_file)
ffm_model.setSigmoid()
ffm_model.predict(model_file, output_file)
with open(output_file) as f:
predictions = f.readlines()
with open(test_file) as f:
truths = f.readlines()
truths = np.array([float(truth.split(' ')[0]) for truth in truths])
predictions = np.array([float(prediction.strip('')) for prediction in predictions])
auc_scores.append(roc_auc_score(truths, predictions))
print('Tuning by grid search takes {0:.2} min'.format(time_tune.interval / 60))
auc_scores = [float('%.4f' % x) for x in auc_scores]
auc_scores_array = np.reshape(auc_scores, (len(param_dict["lr"]), len(param_dict["lambda"])))
auc_df = pd.DataFrame(
data=auc_scores_array,
index=pd.Index(param_dict["lr"], name="LR"),
columns=pd.Index(param_dict["lambda"], name="Lambda")
)
auc_df
fig, ax = plt.subplots()
sns.heatmap(auc_df, cbar=False, annot=True, fmt=".4g")
###Output
_____no_output_____
###Markdown
More advanced tuning methods like Bayesian Optimization can be used for searching for the optimal model efficiently. The benefit of using, for example, `HyperDrive` from Azure Machine Learning Services, for tuning the parameters, is that, the tuning tasks can be distributed across nodes of a cluster and the optimization can be run concurrently to save the total cost.* Details about how to tune hyper parameters by using Azure Machine Learning Services can be found [here](https://github.com/microsoft/recommenders/tree/master/notebooks/04_model_select_and_optimize).* Note, to enable the tuning task on Azure Machine Learning Services by using HyperDrive, one needs a Docker image to containerize the environment where `xlearn` can be run. The Docker file provided [here](https://github.com/microsoft/recommenders/tree/master/docker) can be used for such purpose. 2.4 Clean up
###Code
tmpdir.cleanup()
###Output
_____no_output_____ |
Exercises/02_Matplotlib_Exercises.ipynb | ###Markdown
___ ___ Matplotlib Exercises Welcome to the exercises for reviewing matplotlib! Take your time with these, Matplotlib can be tricky to understand at first. These are relatively simple plots, but they can be hard if this is your first time with matplotlib, feel free to reference the solutions as you go along.Also don't worry if you find the matplotlib syntax frustrating, we actually won't be using it that often throughout the course, we will switch to using seaborn and pandas built-in visualization capabilities. But, those are built-off of matplotlib, which is why it is still important to get exposure to it!** * NOTE: ALL THE COMMANDS FOR PLOTTING A FIGURE SHOULD ALL GO IN THE SAME CELL. SEPARATING THEM OUT INTO MULTIPLE CELLS MAY CAUSE NOTHING TO SHOW UP. * ** ExercisesFollow the instructions to recreate the plots using this data: Data
###Code
import numpy as np
x = np.arange(0,100)
y = x*2
z = x**2
###Output
_____no_output_____
###Markdown
** Import matplotlib.pyplot as plt and set %matplotlib inline if you are using the jupyter notebook. What command do you use if you aren't using the jupyter notebook?**
###Code
###Output
_____no_output_____
###Markdown
Exercise 1** Follow along with these steps: *** ** Create a figure object called fig using plt.figure() *** ** Use add_axes to add an axis to the figure canvas at [0,0,1,1]. Call this new axis ax. *** ** Plot (x,y) on that axes and set the labels and titles to match the plot below:**
###Code
###Output
_____no_output_____
###Markdown
Exercise 2** Create a figure object and put two axes on it, ax1 and ax2. Located at [0,0,1,1] and [0.2,0.5,.2,.2] respectively.**
###Code
###Output
_____no_output_____
###Markdown
** Now plot (x,y) on both axes. And call your figure object to show it.**
###Code
###Output
_____no_output_____
###Markdown
Exercise 3** Create the plot below by adding two axes to a figure object at [0,0,1,1] and [0.2,0.5,.4,.4]**
###Code
###Output
_____no_output_____
###Markdown
** Now use x,y, and z arrays to recreate the plot below. Notice the xlimits and y limits on the inserted plot:**
###Code
###Output
_____no_output_____
###Markdown
Exercise 4** Use plt.subplots(nrows=1, ncols=2) to create the plot below.**
###Code
###Output
_____no_output_____
###Markdown
** Now plot (x,y) and (x,z) on the axes. Play around with the linewidth and style**
###Code
###Output
_____no_output_____
###Markdown
** See if you can resize the plot by adding the figsize() argument in plt.subplots() are copying and pasting your previous code.**
###Code
###Output
_____no_output_____ |
A.Signal_Acquisition/A005 Continuous Body Temperature Measurement using an IR Senso.ipynb | ###Markdown
Continuous Body Temperature Measurement using an IR Sensor Keywords: ```Body Temperature```, ```Thermometer```, ```Kalman filter```,```IIR filters``` I. Introduction One of the most common symptoms present in infeccious diseases is a fever, characterized by an increase in body temperature. Typically, this is monitored using conventional thermometers, which provide a simple measurement at a certain point in time. However, for specific situations, it could be advantageous to provide real-time temperature measurements, as to more closely monitor the progression of the disease. This is particularly important in pandemic situations, where the close monitoring of symptoms is essential. 1. Background The **human body temperature** exhibits relatively small changes over a small period of time. As with every measurement, in this case we can expect to obtain artifacts that can or cannot be corrected. As such, we can classify the artifacts as _biological_ (nonreversible) and _non-biological_ (reversible) artifacts. Some of the most common biological artifacts are the ingestion of food and the circadian cycle, which affect the stability of temperature measurements. These are not "artifacts" *per se*, as they occur naturally, but that can lead to misunderstandings.When one ingests food, naturally the core body temperature slightly increases. Also, the human temperature body changes during the day according to the circadian cycle. This means that the temperature may appear to be increasing due to the presence of fever when in fact this increase is totally normal. Therefore, for a slight increase in body temperature to be considered a fever, it must persist for more than a few hours, in order to take into account these "artifacts". *Image: Average variation of human body temperature during the day* However, some artifacts, such as misplacement of the sensor, bad contact of the sensor itself, dependence on thermal equilibrium and infuence of ambient temperature (as for infrared sensors), as previously explained in [Kalman Filtering](../Kalman/Kalman.ipynb) and [IIR Filtering](../IIR/IIR.ipynb), can be easily corrected, with simple filtering steps. This is very important in continuous measurements since we need to have a way of ignoring these artifacts. Some infrared sensors exhibit a significant dependence on ambient temperature. This is the case with the sensor used for this project. When the sensor measures a drop in ambient temperature, _e.g._ the subject going outside or a gust of wind, there is a notable increase in the measured body temperature to values compatible with a fever, which must be corrected. *Image: Examples of artefacts when using an IR temperature sensor* Therefore, the key aspect in monitoring temperature in real time is **continuous measurement**, which requires data processing to remove artefacts and ensure a stable and clean reading. However, almost all thermometers that provide continuous measurement are unaffordable.The goal of this notebook is to teach you the basics of how to construct and program a simple, affordable and wireless thermometer that provides continuous real-time temperature measurement, using an IR temperature sensor and an Arduino-style Microcontroller.For this type of sensor, the most accurate and stable measurement site is in the ear, as the tympanic membrane shares its blood supply with the hypothalamus, the thermoregulatory center of the human body. *Image: Example of a more advanced setup, for continuous and punctual measurement of the human body temperature, usingo simultaneously IR (black) and IC (blue) temperature sensors.* 2. Objectives* Understand the natural phisiology of human body temperature and the need for filtration* Understand the practical applications of Kalman and IIR filters* Develop a simple, affordable and wireless thermometer 3. Materials * Joy-It Node MCU ESP32 (it is also possible to use other ESP32 boards, like for example the Sparkfun ESP32 Thing)* Jumper wires* Breadboard (optional)* DFRobot's IR I2C Thermometer Sensor Melexis MLX90614 (it is also possible to use other variants of the Melexis MLX90614 IR Sensor)* Computer with Arduino IDE installed II. Experimental 1. Requirements and Set-up Basic knowledge on how to use the Arduino IDE and on how to manage simple electronic operations is recommended. It is also recommended that you read the notebooks concerning [ESP32 Firebase Connection](../ESP32_Firebase_Connection/ESP32_Firebase_Connection.ipynb), [Kalman Filtering](../Kalman/Kalman.ipynb) and [IIR Filtering](../IIR/IIR.ipynb).In order to connect the Joy-It NodeMCU ESP32 board to the Arduino IDE, follow the instructions in the board's [user manual.](https://joy-it.net/files/files/Produkte/SBC-NodeMCU-ESP32/SBC-NodeMCU-ESP32-Manual-20200320.pdf) As mentioned, you will need to download the indicated USB drivers, to connect the board via micro USB to your pc. This will power the board and connect it to the IDE.For other ESP32 boards, look up their user manual in order to set them up properly.Once you've connected the ESP32 board, install the Adafruit MLX90614 Library in the Arduino IDE. This is the library for the IR Sensor. 2. Circuit In order to setup the circuit assembly for this project, connect the IR sensor to the ESP32 board as is shown in the figure. *Image: Circuit assembly with a breadboard, jumper wires, the Joy-It NodeMCU ESP32 board and the DFROBOT MLX90614 IR sensor.* The ground (GND) pin should be connected to the GND on the board. The VCC pin on the sensor should be connected to the 3V3 entry on the board. Make sure that, if you arent using this specific variant of this IR sensor, that its supply voltage is 3.3V, which is the output voltage of this pin in the ESP32 board. Connect the SDA pin on the sensor board to the D21 pin and the SCL pin to the D22 pin on the board. These are the SDA and SCL pins on this specific ESP32 board, if you have a different board, chose the appropriate SDA and SCL pins. These are the pins that will be used by the sensor to transmit the temperature data to the board. Warning! Make sure that the output of your board matches the supply voltage on your sensor! 3. Code Once you've assembled the circuit and connected the board to your computer with a micro USB cable, and finished setting up the board's connection with your pc, it's time to program the board to not only output the sensor value, but also process the data in real time in order to obtain continuous, second by second temperature data without noise or artifacts. Note To view and analyze the data, you can either use the Arduino IDE's serial monitor and serial plotter, or download the software "CoolTerm", which will store the outputted values in a text document. First, make sure to include the necessary libraries, the Adafruit MLX90614 library and the "Wire" library.
###Code
#include <Wire.h>
#include <Adafruit_MLX90614.h>
Adafruit_MLX90614 mlx = Adafruit_MLX90614();
###Output
_____no_output_____
###Markdown
In the Arduino IDE, programs have two functions: ***setup( )*** and ***loop( )***. *Setup( )* runs once, while, *loop( )* runs much like a typical ***while*** or ***for*** loop, except it doesn't stop, and runs continuously while the board is turned on.The *setup( )* function for this project is as follows:
###Code
void setup() {
Serial.begin(115200);
mlx.begin();
Serial.println("IR Sensor Test");
}
###Output
_____no_output_____
###Markdown
As for the data processing, we will be using two filters, a [Kalman Filter](../Kalman/Kalman.ipynb) and an [Adaptive IIR filter](../IIR/IIR.ipynb). As mentioned, there are two separate notebooks regarding the theoretical principles behind these filters, their applications and how to implement them. We will be using Kalman filtration to remove high frequency noise caused by variations in the sensor measurement, in a similar way to how you would use a moving average to smooth out the signal. We will build three Kalman filters: one for an initial pass of the object temperature data, to smooth it and remove the variations, one for the second pass of the object temperature after the application of the adaptive IIR filter to smooth out the final result, and one for the ambient temperature, in order to remove noise and better use this signal to determine the strength of the IIR filter.In order to implement this filter in the Arduino IDE, we will need to first initialize the necessary variables.
###Code
//variables for kalman 1st pass
double Q=0.004;
double R=1.000;
double xhat=36.500;
double P=2.000;
double xhatminus=0.000;
double Pminus=0.000;
double K=0.000;
double M=36.500;
double vm[]={xhat,P,xhatminus,Pminus,K,M,Q,R};
//variables for ambient kalman
double xhatA=36.000;
double PA=1.000;
double xhatminusA=0.000;
double PminusA=0.000;
double KA=0.000;
double MA=36.000;
double vmA[]={xhatA,PA,xhatminusA,PminusA,KA,MA,Q,R};
//variables for kalman 2nd pass
double QPOST=0.0001;
double RPOST=1.000;
double xhatPOST=36.000;
double PPOST=1.000;
double xhatminusPOST=0.000;
double PminusPOST=0.000;
double KPOST=0.000;
double MPOST=36.000;
double vmPOST[]={xhatPOST,PPOST,xhatminusPOST,PminusPOST,KPOST,MPOST,QPOST,RPOST};
###Output
_____no_output_____
###Markdown
***Q*** and ***R*** are the tuning parameters, which were adjusted to obtain the best and most appropriate filtration strength. The variables ***xhat*** and ***xhatminus***, will be the initial values for the current and previous state, *i.e.* the initial values for the object temperature. ***P*** and ***Pminus*** are the error covariance of the current and previous states, ***K*** is the Kalman gain and ***M*** is the current measured data. To learn more about how Kalman filtration works, it is recommended that you read the notebook on [Kalman Filtration](../Kalman/Kalman.ipynb).These variables are placed inside a vector that the function ***kalman(double v[ ])*** will receive as an argument. The function will then compute the new values for each variable in this vector according to the equations of the algorithm:
###Code
static void kalman(double v[]) {
v[2]=v[0];
v[3]=v[1]+v[6];
v[4]=(v[3])/(v[3]+v[7]);
v[0]=v[2]+v[4]*(v[5]-v[0]);
v[1]=(1-v[4])*v[3];
}
###Output
_____no_output_____
###Markdown
Subsequently, we will attempt to remove punctual artifacts using adaptive filtration. As is detailed in the [separate notebook](../IIR/IIR.ipynb) concerning this type of filtration, it uses a correlated signal to selectively or adaptively filter a signal, varying the strength of the filter based on this correlated signal. The IR Sensor being used for this project has the particularity of outputting both the temperature of the object to which it is pointed (the object temperature, or body temperature), and the ambient temperature. Additionally, most artifacts observed in the data from the object temperature correspond to a drop in ambient temperature. That is, when there is a drop in ambient temperature, like a gust of wind hitting the sensor, a sharp rise in the object temperature can be noted, as can be observed in the following figure: *Image: Raw (unfiltered) data obtained from the IR sensor during a test period of around 25 minutes. We can note the artefact caused by blowing air into the ear where the sensor is placed, causing the ambient temperature to decrease and the object temperature measurement to increase. A substantial amount of high frequency noise can also be observed, justifying the use of Kalman filtration to smooth the data.* In order to correct these artifacts, we can apply an adaptive IIR filter to the object temperature data that is activated when the derivative of the ambient temperature reaches a certain threshold. As mentioned in the notebook regarding [adaptive IIR filters](../IIR/IIR.ipynb), the output of this filter depends on a variable alpha, which varies according to a parameter related to the correlated signal. In this case, the value of alpha will depend on the derivative of the ambient temperature. First, we must initialize the variables for the IIR filter, and like we did with the Kalman filter, we store them in a vector:
###Code
//variables for IIR
double y=36.000;
double alpha=0.000;
double a=0.00004;
double tamb=1.000;
double vmf[]={tamb,alpha,y,a};
###Output
_____no_output_____
###Markdown
***y*** is the object temperature measurement, ***alpha*** is the variable alpha, ***a*** is a tuning parameter, and ***tamb*** is the value for the ambient temperature measurement.The function for this filter receives the vectors containing the Kalman variables of the object and ambient temperatures. This is because the data will initially be filtered with a Kalman filter, and therefore the filtered data for the current measurement is contained within these vectors. Alpha is calculated using a sigmoid function, in order to vary it between 0 and 1, depending on the derivative of the ambient temperature (which is the difference between the previous and current values of the ambient temperature).
###Code
static void filtertemp(double vmIIR[], double v[], double vA[]) {
double deriv=fabs(vA[0]-vmIIR[0]);
vmIIR[1]=(2/(1+exp(-vmIIR[3]/deriv)))-1;
double aux=vmIIR[2];
vmIIR[2]=vmIIR[1]*v[0]+(1-vmIIR[1])*aux;
vmIIR[0]=vA[0];
}
###Output
_____no_output_____
###Markdown
Finally, in the ***loop( )*** function, we apply a correction at the start by causing a small delay and averaging out the first values, in order obtain a signal that is stabilized right away, and then apply the filters (Kalman first, then adaptive IIR, then Kalman again to smooth out the signal).
###Code
void loop() {
double c=mlx.readObjectTempC();
double a=mlx.readObjectTempC();
//checking for very low or very high values
if(c>42.0 || c<30.0) {
c=d;
}
d=c;
if(a>40.0 || a<20.0) {
a=b;
}
b=a;
//code for initial stabilization
if(counter1<10) {
counter1+=1;
}
else {
if (counter2<10){
rr+=c/10;
counter2+=1;
}
else {
if(counter3<1) {
vmf[2]=rr;
vm[0]=rr;
vm[5]=rr;
vmPOST[0]=rr;
vmPOST[5]=rr;
counter3+=1;
}
else {
//ambient temperature Kalman
vmA[5]=a;
kalman(vmA);
//object temperature Kalman 1st pass
vm[5]=c;
kalman(vm);
//object temperature IIR filter
filtertemp(vmf,vm,vmA);
//object temperature Kalman 2nd pass
vmPOST[5]=vmf[2];
kalman(vmPOST);
//fever alert message
if(vmPOST[0]>37.5) {
Serial.println("ALERT: POSSIBLE FEVER DETECTED");
}
else {
if(vmPOST[0]>37.2) {
Serial.println("ALERT: POSSIBLE PRE-FEBRILE STATE DETECTED");
}
}
}
//raw temp
//Serial.print(mlx.readObjectTempC(), 3);
//Serial.print(",");
//result:
Serial.println(vmPOST[0], 3);
}
}
###Output
_____no_output_____
###Markdown
Note Because of the way the initial stabilization works with the IIR filter, this device will work best if it is placed in the ear before being turned on to begin the measurement. It will take 20 seconds to stabilize before outputting any value! The full code is as follows:
###Code
/***************************************************
IAS - Instituto Superior Técnico
MSc in Biomedical Engineering
This code uses MLX90614 IR non-contact temperature sensor and
NodeMCU ESP32
****************************************************/
#include <Wire.h>
#include <Adafruit_MLX90614.h>
Adafruit_MLX90614 mlx = Adafruit_MLX90614();
//variables for kalman 1st pass
double Q=0.004;
double R=1.000;
double xhat=36.500;
double P=2.000;
double xhatminus=0.000;
double Pminus=0.000;
double K=0.000;
double M=36.500;
double vm[]={xhat,P,xhatminus,Pminus,K,M,Q,R};
//variables for ambient kalman
double xhatA=36.000;
double PA=1.000;
double xhatminusA=0.000;
double PminusA=0.000;
double KA=0.000;
double MA=36.000;
double vmA[]={xhatA,PA,xhatminusA,PminusA,KA,MA,Q,R};
//variables for IIR
double y=36.000;
double alpha=0.000;
double a=0.00004;
double tamb=1.000;
double vmf[]={tamb,alpha,y,a};
//variables for kalman 2nd pass
double QPOST=0.0001;
double RPOST=1.000;
double xhatPOST=36.000;
double PPOST=1.000;
double xhatminusPOST=0.000;
double PminusPOST=0.000;
double KPOST=0.000;
double MPOST=36.000;
double vmPOST[]={xhatPOST,PPOST,xhatminusPOST,PminusPOST,KPOST,MPOST,QPOST,RPOST};
//variables for initial stabilization
int counter1=0;
int counter2=0;
int counter3=0;
double rr=0.0;
double d=30.0;
double b=20.0;
void setup() {
Serial.begin(115200);
mlx.begin();
Serial.println("IR Sensor Test");
}
static void kalman(double v[]) {
v[2]=v[0];
v[3]=v[1]+v[6];
v[4]=(v[3])/(v[3]+v[7]);
v[0]=v[2]+v[4]*(v[5]-v[0]);
v[1]=(1-v[4])*v[3];
}
static void filtertemp(double vmIIR[], double v[], double vA[]) {
double deriv=fabs(vA[0]-vmIIR[0]);
vmIIR[1]=(2/(1+exp(-vmIIR[3]/deriv)))-1;
double aux=vmIIR[2];
vmIIR[2]=vmIIR[1]*v[0]+(1-vmIIR[1])*aux;
vmIIR[0]=vA[0];
}
void loop() {
double c=mlx.readObjectTempC();
double a=mlx.readObjectTempC();
//checking for very low or very high values
if(c>42.0 || c<30.0) {
c=d;
}
d=c;
if(a>40.0 || a<20.0) {
a=b;
}
b=a;
//code for initial stabilization
if(counter1<10) {
counter1+=1;
}
else {
if (counter2<10){
rr+=c/10;
counter2+=1;
}
else {
if(counter3<1) {
vmf[2]=rr;
vm[0]=rr;
vm[5]=rr;
vmPOST[0]=rr;
vmPOST[5]=rr;
counter3+=1;
}
else {
//ambient temperature Kalman
vmA[5]=a;
kalman(vmA);
//object temperature Kalman 1st pass
vm[5]=c;
kalman(vm);
//object temperature IIR filter
filtertemp(vmf,vm,vmA);
//object temperature Kalman 2nd pass
vmPOST[5]=vmf[2];
kalman(vmPOST);
//fever alert message
if(vmPOST[0]>37.5) {
Serial.println("ALERT: POSSIBLE FEVER DETECTED");
}
else {
if(vmPOST[0]>37.2) {
Serial.println("ALERT: POSSIBLE PRE-FEBRILE STATE DETECTED");
}
}
}
//raw temp
//Serial.print(mlx.readObjectTempC(), 3);
//Serial.print(",");
//result:
Serial.println(vmPOST[0], 3);
}
}
delay(1000);
}
###Output
_____no_output_____ |
cn/.ipynb_checkpoints/sicp-3-22-checkpoint.ipynb | ###Markdown
SICP 习题 (3.22)解题总结: 队列的过程表示 SICP 习题 3.22 要求我们实现一个过程来构建队列。题目中说到,我们可以像习题 3.21那样通过一个序对来保存队列,也可以用一个包含局部变量的过程来表示队列。我们可以通过一个(make-queue)函数返回一个过程,在这个过程里包含队列的数据,同时这个过程可以使用消息传递的风格实现动作的调用。这种实现方法在前面的很多习题里面都使用了,所以在这里就直接上代码了:
###Code
(define (make-queue)
(let ((front-ptr '())
(rear-ptr '()))
(define (set-front-ptr! item)
(set! front-ptr item))
(define (set-rear-ptr! item)
(set! rear-ptr item))
(define (empty-queue?)
(null? front-ptr))
(define (front-queue)
(if (empty-queue?)
(error "FRONT called with an empty queue" front-ptr)
(car front-ptr)))
(define (insert-queue! item)
(let ((new-pair (cons item '())))
(cond ((empty-queue?)
(set-front-ptr! new-pair)
(set-rear-ptr! new-pair)
front-ptr)
(else
(set-cdr! rear-ptr new-pair)
(set-rear-ptr! new-pair)
front-ptr))))
(define (delete-queue!)
(cond ((empty-queue?)
(error "DELETE! called with an empty queue" front-ptr))
(else
(set-front-ptr! (cdr front-ptr))
front-ptr)))
(define (dispatch m)
(cond ((eq? m 'delete-queue! ) delete-queue!)
((eq? m 'insert-queue! ) insert-queue!)
((eq? m 'front-queue ) front-queue)
((eq? m 'empty-queue? ) empty-queue?)
(else (error "Unknown action!" m))))
dispatch))
###Output
_____no_output_____
###Markdown
然后打包一个测试函数:
###Code
(define (start-test-3-22)
(define testing-queue (make-queue))
(display "it is empty now?") (newline)
(display ((testing-queue 'empty-queue?)))
(newline)
(display ((testing-queue 'insert-queue!) 'a))
(newline)
(display ((testing-queue 'insert-queue!) 'b))
(newline)
(display ((testing-queue 'insert-queue!) 'a))
(newline)
(display ((testing-queue 'front-queue)))
(newline)
(display ((testing-queue 'delete-queue!)))
(newline)
(display ((testing-queue 'front-queue)))
(newline)
(display ((testing-queue 'delete-queue!)))
(newline)
(display ((testing-queue 'front-queue)))
(newline))
(start-test-3-22)
###Output
it is empty now?
True
(a)
(a b)
(a b a)
a
(b a)
b
(a)
a
|
doc/ipynb/FaST-LMM.ipynb | ###Markdown
FaST-LMM Manual
=====
Factored Spectrally Transformed Linear Mixed Models
========
Version 0.6.0
FaST-LMM Team, July 20, 2021 IntroductionFaST-LMM, which stands for Factored Spectrally Transformed Linear Mixed Models, is a program for performing genome-wide association studies (GWAS) on datasets of all sizes, up to one millions samples.See [FaST-LMM's README.md](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) for installation instructions, documentation, code, and a bibliography. Contacts* Email the developers at [email protected].* [Join](mailto:[email protected]?subject=Subscribe) the user discussion and announcement list (or use [web sign up](https://mail.python.org/mailman3/lists/fastlmm-user.python.org)).* [Open an issue](https://github.com/fastlmm/FaST-LMM/issues) on GitHub. Citing FaST-LMMIf you use FaST-LMM in any published work, please cite [the relevant manuscript(s)](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) describing it. Data preparationThis version of FaST-LMM is designed for use with randomly ascertained data with Gaussian residuals. If you have case-control data with substantial ascertainment bias, you should first transform your phenotype(s) using [LEAP](https://github.com/omerwe/LEAP) (Weissbrod _et al._, _arXiv_ 2014). If you are analyzing continuous phenotypes with non-Gaussian residuals, you should first transform your phenotype(s) using [Warped-LMM](https://github.com/PMBio/warpedLMM) (Fusi et al., _Nature Commun_ 2014).FaST-LMM uses four input files containing (1) the SNP data to be tested, (2) the SNP data used to determine the genetic similarity matrix (GSM) between individuals (which can be different from 1), (3) the phenotype data, and (4, optionally) a set of covariates.SNP files can be in PLINK format (ped/map, tped/tfam, bed/bim/fam, or fam/dat/map). For the most speed, use the binary format in SNP major order. See the PLINK manual http://pngu.mgh.harvard.edu/~purcell/plink/ (Purcell et al., _Am J Hum Genet_ 2007) for further details. FaST-LMM also supports Hdf5 file format http://www.hdfgroup.org/HDF5/whatishdf5.html. See https://github.com/fastlmm/PySnpTools for more details. Note that each SNP will be standardized to have mean zero and standard deviation one across all individuals before processing. Missing values are mean imputed.The required file containing the phenotype uses the PLINK alternate phenotype format with no header. The covariate file also uses this format (with additional columns for multiple covariates). Notebook preparation and general useTo prepare this notebook to run analyses, please run the following script.
###Code
# set some ipython notebook properties
%matplotlib inline
# set degree of verbosity (adapt to INFO for more verbose output)
import logging
logging.basicConfig(level=logging.WARNING)
# set figure sizes
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
#pylab.plot([1,2,3],[4,5,6])
# set display width for pandas data frames
import pandas as pd
pd.set_option('display.width', 1000)
###Output
_____no_output_____
###Markdown
If you would like to run any of the code below from the command line, first copy it into a file (_e.g._, `test.py`), and then run it by typing `python text.py` at the command line.If you would like to see all the options for a function just type `? ` to an ipython prompt. Single-SNP association testing Traditional analysis: LMM(all) First, let's run a standard LMM analysis in which the GSM uses (almost) all available SNPs. The model for this analysis is called LMM(all) in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). We'll apply this model to the synthetic data in `tests\datasets\synth`. The data has 500 samples with 5000 SNPs, and was generated from a Balding-Nichols model with FST=0.05.When using a linear mixed model for association analysis, it is important to avoid proximal contamination ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). To understand proximal contamination, first note that a LMM with no fixed effects, using a realized relationship matrix for the GSM (as FaST-LMM does), is mathematically equivalent to linear regression of the SNPs on the phenotype, with weights integrated over independent Normal distributions having the same variance (_e.g._, Hayes _et al._, _Genet Res_ 2009). That is, a LMM using a given set of SNPs for the GSM is equivalent to a form of linear regression using those SNPs as covariates to correct for confounding. This equivalence implies that, when testing a given SNP, that SNP (and SNPs physically close to it) should be excluded from the computation of the GSM. If not, when testing a particular SNP, we would also be using that same SNP as a covariate, making the log likelihood of the null model higher than it should be, thus leading to deflation of the test statistic and loss of power.Excluding the SNP you are testing and those SNPs in close proximity to it from the GSM in a naïve way is extremely computationally expensive. A computationally efficient approach for performing the exclusion is to use a GSM computed from all but chromosome $i$ when testing SNPs on chromosome $i$ ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). We call this approach leave out one chromosome (LOOC). The analysis here does this.
###Code
# import the algorithm
import numpy as np
from fastlmm.association import single_snp
from fastlmm.util import example_file # Download and return local file name
# set up data
##############################
from fastlmm.util import example_file # Download and return local file name
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# run gwas
###################################################################
results_df = single_snp(bed_fn, pheno_fn, covar=cov_fn, count_A1=False)
# manhattan plot
import pylab
import fastlmm.util.util as flutil
pylab.rcParams['figure.figsize'] = (10.0, 8.0)#For some reason, need again (appears above too) to get big figures
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
For more on advanced features of single_snp see this [supplemental notebook](https://nbviewer.jupyter.org/github/fastlmm/FaST-LMM/blob/master/doc/ipynb/fastlmm2021.ipynb). Topics covered include multiple phenotypes, filtering large output files, caching intermediate results, and controller muliti-processor runs. Improving power: LMM(all+select) In [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874), we have shown that power can be increased while still maintaining control of type I error by using two GSMs: one based on all SNPs (`G0`) and one based on selected SNPs that are highly correlated with the phenotype (`G1`). The model is called LMM(select + all). This approach has greater computational demands (we recommend using a cluster computer when analyzing large data sets). Here is an example of how to apply this model to the synthetic data.
###Code
# example for two kernel feature selection
# this takes a couple of minutes to run on a 20-proc machine.
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp_all_plus_select
from fastlmm.util import example_file # Download and return local file name
from pysnptools.util.mapreduce1.runner import LocalMultiProc
import multiprocessing
runner = LocalMultiProc(multiprocessing.cpu_count(),mkl_num_threads=4)
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# find the chr5 SNPs
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
#select the 2nd kernel and run GWAS
results_df = single_snp_all_plus_select(test_snps=test_snps,G=snp_reader,pheno=pheno_fn,GB_goal=2,do_plot=True,runner=runner)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Aside: In some applications, you may want to use two kernels constructed from two pre-specified sets of SNPs (i.e., with no feature selection). Here we show how to do that and how to simultaneously find h2 and the mixing weight between the kernels.
###Code
# example script for two kernel without feature selection
import numpy as np
import pysnptools.util
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# select data
###################################################################
snp_reader = Bed(bed_fn,count_A1=True)
# partition snps on chr6 vs rest
G1_chr = 6
G0 = snp_reader[:,snp_reader.pos[:,0] != G1_chr]
G1 = snp_reader[:,snp_reader.pos[:,0] == G1_chr]
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp(test_snps, pheno_fn, K0=G0, K1=G1, covar=cov_fn, GB_goal=2, count_A1=True)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Improving speed and memory use when there is little family strucutre: LMM(select)+PCs In the same publication, we have shown that a simpler, more computationally efficient model can be used when the data is confounded only by population structure and not by family structure or cryptic relatedness. Under these circumstances, we have found that a model with a single GSM based on selected SNPs in combination with principle components as fixed-effect covariates yields good control of type I error and power. This model, called LMM(select)+PCs, should be used with caution. Even if you explicitly remove closely related individuals from your data, cryptic relatedness may remain.To use this model, first identify the principle components to be used with the PCgeno algorithm as described in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). Then you can call single_snp_select with these PCs as covariates.
###Code
from pysnptools.snpreader import Bed
from fastlmm.util import compute_auto_pcs
from fastlmm.association import single_snp_select
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# find number of PCs
pcs = compute_auto_pcs(bed_fn,count_A1=True)
print("selected number of PCs:", pcs["vals"].shape[1])
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp_select(test_snps=test_snps, G=snp_reader, pheno=pheno_fn, covar=pcs, GB_goal=2)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
selected number of PCs: 1
###Markdown
Epistasis You can test for epistatic interactions between pairs of SNPs as well. Here is an example analysis applied to the same synthetic data. Note that this version of the code uses a likelihood ratio test based on maximum-likelihood estimates. A REML-based version is in the works.
###Code
# import the algorithm and reader
import numpy as np
from fastlmm.association import epistasis
from pysnptools.snpreader import Bed
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
bed_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# partition data into the first 50 SNPs on chr1 and all but chr1
G0 = bed_reader[:,bed_reader.pos[:,0] != 1]
test_snps = bed_reader[:,bed_reader.pos[:,0] == 1][:,0:50]
# run epistasis analysis
results_df = epistasis(test_snps, pheno_fn, G0=G0, covar=cov_fn)
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
lambda=0.9234
###Markdown
SNP-set association testing SNP-set association testing is performed similarly to single-SNP testing, except we test sets of SNPs by putting them together into one GSM, separate from any (optional) background GSM that corrects for confounding. Both LRT ("lrt") and score ("sc_davies") tests are supported. The LRT is computed by default, but you can switch to the score test by setting test=“sc_davies”. The score test can be more conservative in some settings (and hence can have less power), but it is closed form and often can be computed much faster. The algorithms in their current form are described in [Lippert et al., Bioinformatics 2014](http://bioinformatics.oxfordjournals.org/content/30/22/3206).Here is an example that uses the LRT and no background GSM--note the inflation in the results.
###Code
# this will take a few minutes to run
# import the algorithm and reader
import datetime
import numpy as np
from fastlmm.association import snp_set
from pysnptools.snpreader import Bed
from fastlmm.util import example_file # Download and return local file name
# define file names
test_snps_fn = example_file('tests/datasets/synth/chr1.*','*.bed').replace('.bed','')
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
set_list_fn = example_file("tests/datasets/synth/chr1.sets.txt")
G0_fn = None
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
As just mentioned, we see inflation of the test statistic because no background GSM was used. When you include a background GSM (using `G0`) type I error is better controlled. As in the single SNP case, proximal contamination is avoided using LOOC.
###Code
#Here we give a G0, the GSM, created via Bed file with all the chroms except chrom 1 (the test snps)
#WARNING: Even on a fast machine, this takes 24 minutes (but, running with test="sc_davies" takes seconds)
bed_reader = Bed(example_file('tests/datasets/synth/all.*','*.bed'),count_A1=True)
chrNot1 = bed_reader[:,bed_reader.pos[:,0] != 1]
G0_fn = 'tempChrNot1.bed'
Bed.write(G0_fn,chrNot1.read(),count_A1=True)
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
constructing LMM - this should only happen once.
###Markdown
The `G1` option available for single-marker testing is not yet supported for set testing. With the same caveats as previously described, however, you can use PC covariates and a `G0` based on selected SNPs when your data is confounded, for example, by population structure. PredictionWe can train on one set of examples and make predictions on another set.
###Code
from pysnptools.snpreader import Pheno, Bed
from fastlmm.inference import FastLMM
import numpy as np
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# Divide the data into train (all but the last 10 individuals) and test (the last 10 individuals)
# (the cov and pheno will automatially be divided to match)
train = snp_reader[:-10,:]
test = snp_reader[-10:,:]
# In the style of scikit-learn, create a predictor and then train it.
fastlmm = FastLMM(GB_goal=2)
fastlmm.fit(K0_train=train,X=cov_fn,y=pheno_fn)
# Now predict with it
mean, covariance = fastlmm.predict(K0_whole_test=test,X=cov_fn)
print("Predicted means and stdevs")
print(mean.val[:,0])
print(np.sqrt(np.diag(covariance.val)))
#Plot actual phenotype and predicted phenotype
whole_pheno = Pheno(pheno_fn)
actual_pheno = whole_pheno[whole_pheno.iid_to_index(mean.iid),:].read()
pylab.plot(actual_pheno.val,"r.")
pylab.plot(mean.val,"b.")
pylab.errorbar(np.arange(mean.iid_count),mean.val,yerr=np.sqrt(np.diag(covariance.val)),fmt='.')
pylab.xlabel('testing examples')
pylab.ylabel('phenotype, actual (red) and predicted (blue with stdev)')
pylab.show()
###Output
Predicted means and stdevs
[-0.01441867 -0.28643403 -0.25428547 0.08895203 -0.2951101 -0.39238038
0.06844377 -0.43282082 -0.21734717 -0.35522391]
[0.95795614 0.96181176 0.95260425 0.9499153 0.9604778 0.95411447
0.96020393 0.9817148 0.95198988 0.96271045]
###Markdown
FaST-LMM Manual=====Factored Spectrally Transformed Linear Mixed Models======== Version 0.4.0FaST-LMM Team, January 9, 2020 IntroductionFaST-LMM, which stands for Factored Spectrally Transformed Linear Mixed Models, is a program for performing genome-wide association studies (GWAS) on datasets of all sizes, up to one millions samples.See [FaST-LMM's README.md](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) for installation instructions, documentation, code, and a bibliography. Contacts* Email the developers at [email protected].* [Join](mailto:[email protected]?subject=Subscribe) the user discussion and announcement list (or use [web sign up](https://mail.python.org/mailman3/lists/fastlmm-user.python.org)).* [Open an issue](https://github.com/fastlmm/FaST-LMM/issues) on GitHub. Citing FaST-LMMIf you use FaST-LMM in any published work, please cite [the relevant manuscript(s)](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) describing it. Data preparationThis version of FaST-LMM is designed for use with randomly ascertained data with Gaussian residuals. If you have case-control data with substantial ascertainment bias, you should first transform your phenotype(s) using [LEAP](https://github.com/omerwe/LEAP) (Weissbrod _et al._, _arXiv_ 2014). If you are analyzing continuous phenotypes with non-Gaussian residuals, you should first transform your phenotype(s) using [Warped-LMM](https://github.com/PMBio/warpedLMM) (Fusi et al., _Nature Commun_ 2014).FaST-LMM uses four input files containing (1) the SNP data to be tested, (2) the SNP data used to determine the genetic similarity matrix (GSM) between individuals (which can be different from 1), (3) the phenotype data, and (4, optionally) a set of covariates.SNP files can be in PLINK format (ped/map, tped/tfam, bed/bim/fam, or fam/dat/map). For the most speed, use the binary format in SNP major order. See the PLINK manual http://pngu.mgh.harvard.edu/~purcell/plink/ (Purcell et al., _Am J Hum Genet_ 2007) for further details. FaST-LMM also supports Hdf5 file format http://www.hdfgroup.org/HDF5/whatishdf5.html. See https://github.com/fastlmm/PySnpTools for more details. Note that each SNP will be standardized to have mean zero and standard deviation one across all individuals before processing. Missing values are mean imputed.The required file containing the phenotype uses the PLINK alternate phenotype format with no header. The covariate file also uses this format (with additional columns for multiple covariates). Notebook preparation and general use To prepare this notebook to run analyses, please run the following script.
###Code
#Import these for Python2 & Python 3 support
from __future__ import absolute_import
from __future__ import print_function
# set some ipython notebook properties
%matplotlib inline
# set degree of verbosity (adapt to INFO for more verbose output)
import logging
logging.basicConfig(level=logging.WARNING)
# set figure sizes
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
#pylab.plot([1,2,3],[4,5,6])
# set display width for pandas data frames
import pandas as pd
pd.set_option('display.width', 1000)
###Output
_____no_output_____
###Markdown
If you would like to run any of the code below from the command line, first copy it into a file (_e.g._, `test.py`), and then run it by typing `python text.py` at the command line.If you would like to see all the options for a function just type `? ` to an ipython prompt. Single-SNP association testing Traditional analysis: LMM(all) First, let's run a standard LMM analysis in which the GSM uses (almost) all available SNPs. The model for this analysis is called LMM(all) in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). We'll apply this model to the synthetic data in `tests\datasets\synth`. The data has 500 samples with 5000 SNPs, and was generated from a Balding-Nichols model with FST=0.05.When using a linear mixed model for association analysis, it is important to avoid proximal contamination ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). To understand proximal contamination, first note that a LMM with no fixed effects, using a realized relationship matrix for the GSM (as FaST-LMM does), is mathematically equivalent to linear regression of the SNPs on the phenotype, with weights integrated over independent Normal distributions having the same variance (_e.g._, Hayes _et al._, _Genet Res_ 2009). That is, a LMM using a given set of SNPs for the GSM is equivalent to a form of linear regression using those SNPs as covariates to correct for confounding. This equivalence implies that, when testing a given SNP, that SNP (and SNPs physically close to it) should be excluded from the computation of the GSM. If not, when testing a particular SNP, we would also be using that same SNP as a covariate, making the log likelihood of the null model higher than it should be, thus leading to deflation of the test statistic and loss of power.Excluding the SNP you are testing and those SNPs in close proximity to it from the GSM in a naïve way is extremely computationally expensive. A computationally efficient approach for performing the exclusion is to use a GSM computed from all but chromosome $i$ when testing SNPs on chromosome $i$ ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). We call this approach leave out one chromosome (LOOC). The analysis here does this.
###Code
# import the algorithm
from fastlmm.association import single_snp
# set up data
##############################
bed_fn = "../../tests/datasets/synth/all"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# run gwas
###################################################################
results_df = single_snp(bed_fn, pheno_fn, covar=cov_fn, count_A1=False)
# manhattan plot
import pylab
import fastlmm.util.util as flutil
pylab.rcParams['figure.figsize'] = (10.0, 8.0)#For some reason, need again (appears above too) to get big figures
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
# qq plot#!!!cmk remove Pvalues of 1 from plot(they are SNCs)
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
Improving power: LMM(all+select) In [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874), we have shown that power can be increased while still maintaining control of type I error by using two GSMs: one based on all SNPs (`G0`) and one based on selected SNPs that are highly correlated with the phenotype (`G1`). The model is called LMM(select + all). This approach has greater computational demands (we recommend using a cluster computer when analyzing large data sets). Here is an example of how to apply this model to the synthetic data.
###Code
# example for two kernel feature selection
# this takes a couple of minutes to run on a 20-proc machine.
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp_all_plus_select
from pysnptools.util.mapreduce1.runner import LocalMultiProc
runner = LocalMultiProc(20,mkl_num_threads=5)
# define file names
snp_reader = Bed("../../tests/datasets/synth/all", count_A1=True)
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# find the chr5 SNPs
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
#select the 2nd kernel and run GWAS
results_df = single_snp_all_plus_select(test_snps=test_snps,G=snp_reader,pheno=pheno_fn,GB_goal=2,do_plot=True,runner=runner)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Aside: In some applications, you may want to use two kernels constructed from two pre-specified sets of SNPs (i.e., with no feature selection). Here we show how to do that and how to simultaneously find h2 and the mixing weight between the kernels.
###Code
# example script for two kernel without feature selection
import numpy as np
import pysnptools.util
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp
# define file names
bed_fn = "../../tests/datasets/synth/all"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# select data
###################################################################
snp_reader = Bed(bed_fn,count_A1=True)
# partition snps on chr6 vs rest
G1_chr = 6
G0 = snp_reader[:,snp_reader.pos[:,0] != G1_chr]
G1 = snp_reader[:,snp_reader.pos[:,0] == G1_chr]
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp(test_snps, pheno_fn, K0=G0, K1=G1, covar=cov_fn, GB_goal=2, count_A1=True)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Improving speed and memory use when there is little family strucutre: LMM(select)+PCs In the same publication, we have shown that a simpler, more computationally efficient model can be used when the data is confounded only by population structure and not by family structure or cryptic relatedness. Under these circumstances, we have found that a model with a single GSM based on selected SNPs in combination with principle components as fixed-effect covariates yields good control of type I error and power. This model, called LMM(select)+PCs, should be used with caution. Even if you explicitly remove closely related individuals from your data, cryptic relatedness may remain.To use this model, first identify the principle components to be used with the PCgeno algorithm as described in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). Then you can call single_snp_select with these PCs as covariates.
###Code
from pysnptools.snpreader import Bed
from fastlmm.util import compute_auto_pcs
from fastlmm.association import single_snp_select
# define file names
bed_fn = "../../tests/datasets/synth/all"
snp_reader = Bed(bed_fn,count_A1=True)
phen_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
# find number of PCs
pcs = compute_auto_pcs(bed_fn,count_A1=True)
print("selected number of PCs:", pcs["vals"].shape[1])
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp_select(test_snps=test_snps, G=snp_reader, pheno=phen_fn, covar=pcs, GB_goal=2)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
selected number of PCs: 1
###Markdown
Epistasis You can test for epistatic interactions between pairs of SNPs as well. Here is an example analysis applied to the same synthetic data. Note that this version of the code uses a likelihood ratio test based on maximum-likelihood estimates. A REML-based version is in the works.
###Code
# import the algorithm and reader
from fastlmm.association import epistasis
from pysnptools.snpreader import Bed
# define file names
bed_reader = Bed("../../tests/datasets/synth/all", count_A1=True)
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# partition data into the first 50 SNPs on chr1 and all but chr1
G0 = bed_reader[:,bed_reader.pos[:,0] != 1]
test_snps = bed_reader[:,bed_reader.pos[:,0] == 1][:,0:50]
# run epistasis analysis
results_df = epistasis(test_snps, pheno_fn, G0=G0, covar=cov_fn)
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
lambda=0.9234
###Markdown
SNP-set association testing SNP-set association testing is performed similarly to single-SNP testing, except we test sets of SNPs by putting them together into one GSM, separate from any (optional) background GSM that corrects for confounding. Both LRT ("lrt") and score ("sc_davies") tests are supported. The LRT is computed by default, but you can switch to the score test by setting test=“sc_davies”. The score test can be more conservative in some settings (and hence can have less power), but it is closed form and often can be computed much faster. The algorithms in their current form are described in [Lippert et al., Bioinformatics 2014](http://bioinformatics.oxfordjournals.org/content/30/22/3206).Here is an example that uses the LRT and no background GSM--note the inflation in the results.
###Code
# this will take a few minutes to run
# import the algorithm and reader
from fastlmm.association import snp_set
from pysnptools.snpreader import Bed
import datetime
# define file names
test_snps_fn = "../../tests/datasets/synth/chr1"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
set_list_fn = "../../tests/datasets/synth/chr1.sets.txt"
G0_fn = None
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
As just mentioned, we see inflation of the test statistic because no background GSM was used. When you include a background GSM (using `G0`) type I error is better controlled. As in the single SNP case, proximal contamination is avoided using LOOC.
###Code
#Here we give a G0, the GSM, created via Bed file with all the chroms except chrom 1 (the test snps)
#WARNING: Even on a fast machine, this takes 24 minutes (but, running with test="sc_davies" takes seconds)
bed_reader = Bed("../../tests/datasets/synth/all.bed",count_A1=True)
chrNot1 = bed_reader[:,bed_reader.pos[:,0] != 1]
G0_fn = "../../tests/datasets/synth/chrNot1.bed"
Bed.write(G0_fn,chrNot1.read(),count_A1=True)
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
constructing LMM - this should only happen once.
###Markdown
The `G1` option available for single-marker testing is not yet supported for set testing. With the same caveats as previously described, however, you can use PC covariates and a `G0` based on selected SNPs when your data is confounded, for example, by population structure. PredictionWe can train on one set of examples and make predictions on another set.
###Code
from pysnptools.snpreader import Pheno, Bed
from fastlmm.inference import FastLMM
import numpy as np
# define file names
snp_reader = Bed("../../tests/datasets/synth/all",count_A1=True)
cov_fn = "../../tests/datasets/synth/cov.txt"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
# Divide the data into train (all but the last 10 individuals) and test (the last 10 individuals)
# (the cov and pheno will automatially be divided to match)
train = snp_reader[:-10,:]
test = snp_reader[-10:,:]
# In the style of scikit-learn, create a predictor and then train it.
fastlmm = FastLMM(GB_goal=2)
fastlmm.fit(K0_train=train,X=cov_fn,y=pheno_fn)
# Now predict with it
mean, covariance = fastlmm.predict(K0_whole_test=test,X=cov_fn)
print("Predicted means and stdevs")
print(mean.val[:,0])
print(np.sqrt(np.diag(covariance.val)))
#Plot actual phenotype and predicted phenotype
whole_pheno = Pheno(pheno_fn)
actual_pheno = whole_pheno[whole_pheno.iid_to_index(mean.iid),:].read()
pylab.plot(actual_pheno.val,"r.")
pylab.plot(mean.val,"b.")
pylab.errorbar(np.arange(mean.iid_count),mean.val,yerr=np.sqrt(np.diag(covariance.val)),fmt='.')
pylab.xlabel('testing examples')
pylab.ylabel('phenotype, actual (red) and predicted (blue with stdev)')
pylab.show()
###Output
Predicted means and stdevs
[-0.01441867 -0.28643404 -0.25428548 0.08895204 -0.29511011 -0.39238039
0.06844378 -0.43282083 -0.21734717 -0.35522392]
[0.95795614 0.96181175 0.95260425 0.9499153 0.96047779 0.95411447
0.96020393 0.9817148 0.95198988 0.96271044]
###Markdown
FaST-LMM Manual=====Factored Spectrally Transformed Linear Mixed Models======== Version 0.5.0FaST-LMM Team, February 20, 2021 IntroductionFaST-LMM, which stands for Factored Spectrally Transformed Linear Mixed Models, is a program for performing genome-wide association studies (GWAS) on datasets of all sizes, up to one millions samples.See [FaST-LMM's README.md](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) for installation instructions, documentation, code, and a bibliography. Contacts* Email the developers at [email protected].* [Join](mailto:[email protected]?subject=Subscribe) the user discussion and announcement list (or use [web sign up](https://mail.python.org/mailman3/lists/fastlmm-user.python.org)).* [Open an issue](https://github.com/fastlmm/FaST-LMM/issues) on GitHub. Citing FaST-LMMIf you use FaST-LMM in any published work, please cite [the relevant manuscript(s)](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) describing it. Data preparationThis version of FaST-LMM is designed for use with randomly ascertained data with Gaussian residuals. If you have case-control data with substantial ascertainment bias, you should first transform your phenotype(s) using [LEAP](https://github.com/omerwe/LEAP) (Weissbrod _et al._, _arXiv_ 2014). If you are analyzing continuous phenotypes with non-Gaussian residuals, you should first transform your phenotype(s) using [Warped-LMM](https://github.com/PMBio/warpedLMM) (Fusi et al., _Nature Commun_ 2014).FaST-LMM uses four input files containing (1) the SNP data to be tested, (2) the SNP data used to determine the genetic similarity matrix (GSM) between individuals (which can be different from 1), (3) the phenotype data, and (4, optionally) a set of covariates.SNP files can be in PLINK format (ped/map, tped/tfam, bed/bim/fam, or fam/dat/map). For the most speed, use the binary format in SNP major order. See the PLINK manual http://pngu.mgh.harvard.edu/~purcell/plink/ (Purcell et al., _Am J Hum Genet_ 2007) for further details. FaST-LMM also supports Hdf5 file format http://www.hdfgroup.org/HDF5/whatishdf5.html. See https://github.com/fastlmm/PySnpTools for more details. Note that each SNP will be standardized to have mean zero and standard deviation one across all individuals before processing. Missing values are mean imputed.The required file containing the phenotype uses the PLINK alternate phenotype format with no header. The covariate file also uses this format (with additional columns for multiple covariates). Notebook preparation and general use To prepare this notebook to run analyses, please run the following script.
###Code
# set some ipython notebook properties
%matplotlib inline
# set degree of verbosity (adapt to INFO for more verbose output)
import logging
logging.basicConfig(level=logging.WARNING)
# set figure sizes
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
#pylab.plot([1,2,3],[4,5,6])
# set display width for pandas data frames
import pandas as pd
pd.set_option('display.width', 1000)
###Output
_____no_output_____
###Markdown
If you would like to run any of the code below from the command line, first copy it into a file (_e.g._, `test.py`), and then run it by typing `python text.py` at the command line.If you would like to see all the options for a function just type `? ` to an ipython prompt. Single-SNP association testing Traditional analysis: LMM(all) First, let's run a standard LMM analysis in which the GSM uses (almost) all available SNPs. The model for this analysis is called LMM(all) in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). We'll apply this model to the synthetic data in `tests\datasets\synth`. The data has 500 samples with 5000 SNPs, and was generated from a Balding-Nichols model with FST=0.05.When using a linear mixed model for association analysis, it is important to avoid proximal contamination ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). To understand proximal contamination, first note that a LMM with no fixed effects, using a realized relationship matrix for the GSM (as FaST-LMM does), is mathematically equivalent to linear regression of the SNPs on the phenotype, with weights integrated over independent Normal distributions having the same variance (_e.g._, Hayes _et al._, _Genet Res_ 2009). That is, a LMM using a given set of SNPs for the GSM is equivalent to a form of linear regression using those SNPs as covariates to correct for confounding. This equivalence implies that, when testing a given SNP, that SNP (and SNPs physically close to it) should be excluded from the computation of the GSM. If not, when testing a particular SNP, we would also be using that same SNP as a covariate, making the log likelihood of the null model higher than it should be, thus leading to deflation of the test statistic and loss of power.Excluding the SNP you are testing and those SNPs in close proximity to it from the GSM in a naïve way is extremely computationally expensive. A computationally efficient approach for performing the exclusion is to use a GSM computed from all but chromosome $i$ when testing SNPs on chromosome $i$ ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). We call this approach leave out one chromosome (LOOC). The analysis here does this.
###Code
# import the algorithm
import numpy as np
from fastlmm.association import single_snp
from fastlmm.util import example_file # Download and return local file name
# set up data
##############################
from fastlmm.util import example_file # Download and return local file name
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# run gwas
###################################################################
results_df = single_snp(bed_fn, pheno_fn, covar=cov_fn, count_A1=False)
# manhattan plot
import pylab
import fastlmm.util.util as flutil
pylab.rcParams['figure.figsize'] = (10.0, 8.0)#For some reason, need again (appears above too) to get big figures
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
D:\OneDrive\programs\fastlmm\doc\ipynb
###Markdown
Improving power: LMM(all+select) In [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874), we have shown that power can be increased while still maintaining control of type I error by using two GSMs: one based on all SNPs (`G0`) and one based on selected SNPs that are highly correlated with the phenotype (`G1`). The model is called LMM(select + all). This approach has greater computational demands (we recommend using a cluster computer when analyzing large data sets). Here is an example of how to apply this model to the synthetic data.
###Code
# example for two kernel feature selection
# this takes a couple of minutes to run on a 20-proc machine.
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp_all_plus_select
from fastlmm.util import example_file # Download and return local file name
from pysnptools.util.mapreduce1.runner import LocalMultiProc
import multiprocessing
runner = LocalMultiProc(multiprocessing.cpu_count(),mkl_num_threads=4)
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# find the chr5 SNPs
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
#select the 2nd kernel and run GWAS
results_df = single_snp_all_plus_select(test_snps=test_snps,G=snp_reader,pheno=pheno_fn,GB_goal=2,do_plot=True,runner=runner)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Aside: In some applications, you may want to use two kernels constructed from two pre-specified sets of SNPs (i.e., with no feature selection). Here we show how to do that and how to simultaneously find h2 and the mixing weight between the kernels.
###Code
# example script for two kernel without feature selection
import numpy as np
import pysnptools.util
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# select data
###################################################################
snp_reader = Bed(bed_fn,count_A1=True)
# partition snps on chr6 vs rest
G1_chr = 6
G0 = snp_reader[:,snp_reader.pos[:,0] != G1_chr]
G1 = snp_reader[:,snp_reader.pos[:,0] == G1_chr]
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp(test_snps, pheno_fn, K0=G0, K1=G1, covar=cov_fn, GB_goal=2, count_A1=True)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Improving speed and memory use when there is little family strucutre: LMM(select)+PCs In the same publication, we have shown that a simpler, more computationally efficient model can be used when the data is confounded only by population structure and not by family structure or cryptic relatedness. Under these circumstances, we have found that a model with a single GSM based on selected SNPs in combination with principle components as fixed-effect covariates yields good control of type I error and power. This model, called LMM(select)+PCs, should be used with caution. Even if you explicitly remove closely related individuals from your data, cryptic relatedness may remain.To use this model, first identify the principle components to be used with the PCgeno algorithm as described in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). Then you can call single_snp_select with these PCs as covariates.
###Code
from pysnptools.snpreader import Bed
from fastlmm.util import compute_auto_pcs
from fastlmm.association import single_snp_select
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# find number of PCs
pcs = compute_auto_pcs(bed_fn,count_A1=True)
print("selected number of PCs:", pcs["vals"].shape[1])
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp_select(test_snps=test_snps, G=snp_reader, pheno=pheno_fn, covar=pcs, GB_goal=2)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
selected number of PCs: 1
###Markdown
Epistasis You can test for epistatic interactions between pairs of SNPs as well. Here is an example analysis applied to the same synthetic data. Note that this version of the code uses a likelihood ratio test based on maximum-likelihood estimates. A REML-based version is in the works.
###Code
# import the algorithm and reader
import numpy as np
from fastlmm.association import epistasis
from pysnptools.snpreader import Bed
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
bed_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# partition data into the first 50 SNPs on chr1 and all but chr1
G0 = bed_reader[:,bed_reader.pos[:,0] != 1]
test_snps = bed_reader[:,bed_reader.pos[:,0] == 1][:,0:50]
# run epistasis analysis
results_df = epistasis(test_snps, pheno_fn, G0=G0, covar=cov_fn)
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
lambda=0.9234
###Markdown
SNP-set association testing SNP-set association testing is performed similarly to single-SNP testing, except we test sets of SNPs by putting them together into one GSM, separate from any (optional) background GSM that corrects for confounding. Both LRT ("lrt") and score ("sc_davies") tests are supported. The LRT is computed by default, but you can switch to the score test by setting test=“sc_davies”. The score test can be more conservative in some settings (and hence can have less power), but it is closed form and often can be computed much faster. The algorithms in their current form are described in [Lippert et al., Bioinformatics 2014](http://bioinformatics.oxfordjournals.org/content/30/22/3206).Here is an example that uses the LRT and no background GSM--note the inflation in the results.
###Code
# this will take a few minutes to run
# import the algorithm and reader
import datetime
import numpy as np
from fastlmm.association import snp_set
from pysnptools.snpreader import Bed
from fastlmm.util import example_file # Download and return local file name
# define file names
test_snps_fn = example_file('tests/datasets/synth/chr1.*','*.bed').replace('.bed','')
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
set_list_fn = example_file("tests/datasets/synth/chr1.sets.txt")
G0_fn = None
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
As just mentioned, we see inflation of the test statistic because no background GSM was used. When you include a background GSM (using `G0`) type I error is better controlled. As in the single SNP case, proximal contamination is avoided using LOOC.
###Code
#Here we give a G0, the GSM, created via Bed file with all the chroms except chrom 1 (the test snps)
#WARNING: Even on a fast machine, this takes 24 minutes (but, running with test="sc_davies" takes seconds)
bed_reader = Bed(example_file('tests/datasets/synth/all.*','*.bed'),count_A1=True)
chrNot1 = bed_reader[:,bed_reader.pos[:,0] != 1]
G0_fn = 'tempChrNot1.bed'
Bed.write(G0_fn,chrNot1.read(),count_A1=True)
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
constructing LMM - this should only happen once.
###Markdown
The `G1` option available for single-marker testing is not yet supported for set testing. With the same caveats as previously described, however, you can use PC covariates and a `G0` based on selected SNPs when your data is confounded, for example, by population structure. PredictionWe can train on one set of examples and make predictions on another set.
###Code
from pysnptools.snpreader import Pheno, Bed
from fastlmm.inference import FastLMM
import numpy as np
from fastlmm.util import example_file # Download and return local file name
# define file names
bed_fn = example_file('tests/datasets/synth/all.*','*.bed')
snp_reader = Bed(bed_fn, count_A1=True)
pheno_fn = example_file("tests/datasets/synth/pheno_10_causals.txt")
cov_fn = example_file("tests/datasets/synth/cov.txt")
# Divide the data into train (all but the last 10 individuals) and test (the last 10 individuals)
# (the cov and pheno will automatially be divided to match)
train = snp_reader[:-10,:]
test = snp_reader[-10:,:]
# In the style of scikit-learn, create a predictor and then train it.
fastlmm = FastLMM(GB_goal=2)
fastlmm.fit(K0_train=train,X=cov_fn,y=pheno_fn)
# Now predict with it
mean, covariance = fastlmm.predict(K0_whole_test=test,X=cov_fn)
print("Predicted means and stdevs")
print(mean.val[:,0])
print(np.sqrt(np.diag(covariance.val)))
#Plot actual phenotype and predicted phenotype
whole_pheno = Pheno(pheno_fn)
actual_pheno = whole_pheno[whole_pheno.iid_to_index(mean.iid),:].read()
pylab.plot(actual_pheno.val,"r.")
pylab.plot(mean.val,"b.")
pylab.errorbar(np.arange(mean.iid_count),mean.val,yerr=np.sqrt(np.diag(covariance.val)),fmt='.')
pylab.xlabel('testing examples')
pylab.ylabel('phenotype, actual (red) and predicted (blue with stdev)')
pylab.show()
###Output
Predicted means and stdevs
[-0.01441867 -0.28643403 -0.25428547 0.08895203 -0.2951101 -0.39238038
0.06844377 -0.43282082 -0.21734717 -0.35522391]
[0.95795614 0.96181176 0.95260425 0.9499153 0.9604778 0.95411447
0.96020393 0.9817148 0.95198988 0.96271045]
###Markdown
FaST-LMM Manual=====Factored Spectrally Transformed Linear Mixed Models======== Version 0.3.8FaST-LMM Team, December 4, 2019 IntroductionFaST-LMM, which stands for Factored Spectrally Transformed Linear Mixed Models, is a program for performing genome-wide association studies (GWAS) on datasets of all sizes, up to one millions samples.See [FaST-LMM's README.md](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) for installation instructions, documentation, code, and a bibliography. Contacts* Email the developers at [email protected].* [Join](mailto:[email protected]?subject=Subscribe) the user discussion and announcement list (or use [web sign up](https://mail.python.org/mailman3/lists/fastlmm-user.python.org)).* [Open an issue](https://github.com/fastlmm/FaST-LMM/issues) on GitHub. Citing FaST-LMMIf you use FaST-LMM in any published work, please cite [the relevant manuscript(s)](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) describing it. Data preparationThis version of FaST-LMM is designed for use with randomly ascertained data with Gaussian residuals. If you have case-control data with substantial ascertainment bias, you should first transform your phenotype(s) using [LEAP](https://github.com/omerwe/LEAP) (Weissbrod _et al._, _arXiv_ 2014). If you are analyzing continuous phenotypes with non-Gaussian residuals, you should first transform your phenotype(s) using [Warped-LMM](https://github.com/PMBio/warpedLMM) (Fusi et al., _Nature Commun_ 2014).FaST-LMM uses four input files containing (1) the SNP data to be tested, (2) the SNP data used to determine the genetic similarity matrix (GSM) between individuals (which can be different from 1), (3) the phenotype data, and (4, optionally) a set of covariates.SNP files can be in PLINK format (ped/map, tped/tfam, bed/bim/fam, or fam/dat/map). For the most speed, use the binary format in SNP major order. See the PLINK manual http://pngu.mgh.harvard.edu/~purcell/plink/ (Purcell et al., _Am J Hum Genet_ 2007) for further details. FaST-LMM also supports Hdf5 file format http://www.hdfgroup.org/HDF5/whatishdf5.html. See https://github.com/fastlmm/PySnpTools for more details. Note that each SNP will be standardized to have mean zero and standard deviation one across all individuals before processing. Missing values are mean imputed.The required file containing the phenotype uses the PLINK alternate phenotype format with no header. The covariate file also uses this format (with additional columns for multiple covariates). Notebook preparation and general use To prepare this notebook to run analyses, please run the following script.
###Code
# set some ipython notebook properties
%matplotlib inline
# set degree of verbosity (adapt to INFO for more verbose output)
import logging
logging.basicConfig(level=logging.WARNING)
# set figure sizes
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
# set display width for pandas data frames
import pandas as pd
pd.set_option('display.width', 1000)
###Output
_____no_output_____
###Markdown
If you would like to run any of the code below from the command line, first copy it into a file (_e.g._, `test.py`), and then run it by typing `python text.py` at the command line.If you would like to see all the options for a function just type `? ` to an ipython prompt. Single-SNP association testing Traditional analysis: LMM(all) First, let's run a standard LMM analysis in which the GSM uses (almost) all available SNPs. The model for this analysis is called LMM(all) in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). We'll apply this model to the synthetic data in `tests\datasets\synth`. The data has 500 samples with 5000 SNPs, and was generated from a Balding-Nichols model with FST=0.05.When using a linear mixed model for association analysis, it is important to avoid proximal contamination ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). To understand proximal contamination, first note that a LMM with no fixed effects, using a realized relationship matrix for the GSM (as FaST-LMM does), is mathematically equivalent to linear regression of the SNPs on the phenotype, with weights integrated over independent Normal distributions having the same variance (_e.g._, Hayes _et al._, _Genet Res_ 2009). That is, a LMM using a given set of SNPs for the GSM is equivalent to a form of linear regression using those SNPs as covariates to correct for confounding. This equivalence implies that, when testing a given SNP, that SNP (and SNPs physically close to it) should be excluded from the computation of the GSM. If not, when testing a particular SNP, we would also be using that same SNP as a covariate, making the log likelihood of the null model higher than it should be, thus leading to deflation of the test statistic and loss of power.Excluding the SNP you are testing and those SNPs in close proximity to it from the GSM in a naïve way is extremely computationally expensive. A computationally efficient approach for performing the exclusion is to use a GSM computed from all but chromosome $i$ when testing SNPs on chromosome $i$ ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). We call this approach leave out one chromosome (LOOC). The analysis here does this.
###Code
# import the algorithm
from fastlmm.association import single_snp
# set up data
##############################
bed_fn = "../../tests/datasets/synth/all"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# run gwas
###################################################################
results_df = single_snp(bed_fn, pheno_fn, covar=cov_fn, count_A1=False)
# manhattan plot
import pylab
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
Improving power: LMM(all+select) In [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874), we have shown that power can be increased while still maintaining control of type I error by using two GSMs: one based on all SNPs (`G0`) and one based on selected SNPs that are highly correlated with the phenotype (`G1`). The model is called LMM(select + all). This approach has greater computational demands (we recommend using a cluster computer when analyzing large data sets). Here is an example of how to apply this model to the synthetic data.
###Code
# example for two kernel feature selection
# this takes a couple of minutes to run on a 20-proc machine.
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp_all_plus_select
from pysnptools.util.mapreduce1.runner import LocalMultiProc
runner = LocalMultiProc(20,mkl_num_threads=5)
# define file names
snp_reader = Bed("../../tests/datasets/synth/all", count_A1=True)
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# find the chr5 SNPs
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
#select the 2nd kernel and run GWAS
results_df = single_snp_all_plus_select(test_snps=test_snps,G=snp_reader,pheno=pheno_fn,GB_goal=2,do_plot=True,runner=runner)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Aside: In some applications, you may want to use two kernels constructed from two pre-specified sets of SNPs (i.e., with no feature selection). Here we show how to do that and how to simultaneously find h2 and the mixing weight between the kernels.
###Code
# example script for two kernel without feature selection
import numpy as np
import pysnptools.util
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp
# define file names
bed_fn = "../../tests/datasets/synth/all"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# select data
###################################################################
snp_reader = Bed(bed_fn,count_A1=True)
# partition snps on chr6 vs rest
G1_chr = 6
G0 = snp_reader[:,snp_reader.pos[:,0] != G1_chr]
G1 = snp_reader[:,snp_reader.pos[:,0] == G1_chr]
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp(test_snps, pheno_fn, K0=G0, K1=G1, covar=cov_fn, GB_goal=2, count_A1=True)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Improving speed and memory use when there is little family strucutre: LMM(select)+PCs In the same publication, we have shown that a simpler, more computationally efficient model can be used when the data is confounded only by population structure and not by family structure or cryptic relatedness. Under these circumstances, we have found that a model with a single GSM based on selected SNPs in combination with principle components as fixed-effect covariates yields good control of type I error and power. This model, called LMM(select)+PCs, should be used with caution. Even if you explicitly remove closely related individuals from your data, cryptic relatedness may remain.To use this model, first identify the principle components to be used with the PCgeno algorithm as described in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). Then you can call single_snp_select with these PCs as covariates.
###Code
from pysnptools.snpreader import Bed
from fastlmm.util import compute_auto_pcs
from fastlmm.association import single_snp_select
# define file names
bed_fn = "../../tests/datasets/synth/all"
snp_reader = Bed(bed_fn,count_A1=True)
phen_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
# find number of PCs
pcs = compute_auto_pcs(bed_fn,count_A1=True)
print "selected number of PCs:", pcs["vals"].shape[1]
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp_select(test_snps=test_snps, G=snp_reader, pheno=phen_fn, covar=pcs, GB_goal=2)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
selected number of PCs: 1
###Markdown
Epistasis You can test for epistatic interactions between pairs of SNPs as well. Here is an example analysis applied to the same synthetic data. Note that this version of the code uses a likelihood ratio test based on maximum-likelihood estimates. A REML-based version is in the works.
###Code
# import the algorithm and reader
from fastlmm.association import epistasis
from pysnptools.snpreader import Bed
# define file names
bed_reader = Bed("../../tests/datasets/synth/all", count_A1=True)
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# partition data into the first 50 SNPs on chr1 and all but chr1
G0 = bed_reader[:,bed_reader.pos[:,0] != 1]
test_snps = bed_reader[:,bed_reader.pos[:,0] == 1][:,0:50]
# run epistasis analysis
results_df = epistasis(test_snps, pheno_fn, G0=G0, covar=cov_fn)
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
lambda=0.9234
###Markdown
SNP-set association testing SNP-set association testing is performed similarly to single-SNP testing, except we test sets of SNPs by putting them together into one GSM, separate from any (optional) background GSM that corrects for confounding. Both LRT ("lrt") and score ("sc_davies") tests are supported. The LRT is computed by default, but you can switch to the score test by setting test=“sc_davies”. The score test can be more conservative in some settings (and hence can have less power), but it is closed form and often can be computed much faster. The algorithms in their current form are described in [Lippert et al., Bioinformatics 2014](http://bioinformatics.oxfordjournals.org/content/30/22/3206).Here is an example that uses the LRT and no background GSM--note the inflation in the results.
###Code
# this will take a few minutes to run
# import the algorithm and reader
from fastlmm.association import snp_set
from pysnptools.snpreader import Bed
import datetime
# define file names
test_snps_fn = "../../tests/datasets/synth/chr1"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
set_list_fn = "../../tests/datasets/synth/chr1.sets.txt"
G0_fn = None
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
As just mentioned, we see inflation of the test statistic because no background GSM was used. When you include a background GSM (using `G0`) type I error is better controlled. As in the single SNP case, proximal contamination is avoided using LOOC.
###Code
#Here we give a G0, the GSM, created via Bed file with all the chroms except chrom 1 (the test snps)
#WARNING: Even on a fast machine, this takes 24 minutes (but, running with test="sc_davies" takes seconds)
bed_reader = Bed("../../tests/datasets/synth/all.bed",count_A1=True)
chrNot1 = bed_reader[:,bed_reader.pos[:,0] != 1]
G0_fn = "../../tests/datasets/synth/chrNot1.bed"
Bed.write(G0_fn,chrNot1.read(),count_A1=True)
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
constructing LMM - this should only happen once.
###Markdown
The `G1` option available for single-marker testing is not yet supported for set testing. With the same caveats as previously described, however, you can use PC covariates and a `G0` based on selected SNPs when your data is confounded, for example, by population structure. PredictionWe can train on one set of examples and make predictions on another set.
###Code
from pysnptools.snpreader import Pheno, Bed
from fastlmm.inference import FastLMM
import numpy as np
# define file names
snp_reader = Bed("../../tests/datasets/synth/all",count_A1=True)
cov_fn = "../../tests/datasets/synth/cov.txt"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
# Divide the data into train (all but the last 10 individuals) and test (the last 10 individuals)
# (the cov and pheno will automatially be divided to match)
train = snp_reader[:-10,:]
test = snp_reader[-10:,:]
# In the style of scikit-learn, create a predictor and then train it.
fastlmm = FastLMM(GB_goal=2)
fastlmm.fit(K0_train=train,X=cov_fn,y=pheno_fn)
# Now predict with it
mean, covariance = fastlmm.predict(K0_whole_test=test,X=cov_fn)
print "Predicted means and stdevs"
print mean.val[:,0]
print np.sqrt(np.diag(covariance.val))
#Plot actual phenotype and predicted phenotype
whole_pheno = Pheno(pheno_fn)
actual_pheno = whole_pheno[whole_pheno.iid_to_index(mean.iid),:].read()
pylab.plot(actual_pheno.val,"r.")
pylab.plot(mean.val,"b.")
pylab.errorbar(np.arange(mean.iid_count),mean.val,yerr=np.sqrt(np.diag(covariance.val)),fmt='.')
pylab.xlabel('testing examples')
pylab.ylabel('phenotype, actual (red) and predicted (blue with stdev)')
pylab.show()
###Output
Predicted means and stdevs
[-0.01441867 -0.28643404 -0.25428548 0.08895204 -0.29511011 -0.39238039
0.06844378 -0.43282083 -0.21734717 -0.35522392]
[0.95795614 0.96181175 0.95260425 0.9499153 0.96047779 0.95411447
0.96020393 0.9817148 0.95198988 0.96271044]
###Markdown
FaST-LMM Manual=====Factored Spectrally Transformed Linear Mixed Models======== Version 0.4.0FaST-LMM Team, January 9, 2020 IntroductionFaST-LMM, which stands for Factored Spectrally Transformed Linear Mixed Models, is a program for performing genome-wide association studies (GWAS) on datasets of all sizes, up to one millions samples.See [FaST-LMM's README.md](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) for installation instructions, documentation, code, and a bibliography. Contacts* Email the developers at [email protected].* [Join](mailto:[email protected]?subject=Subscribe) the user discussion and announcement list (or use [web sign up](https://mail.python.org/mailman3/lists/fastlmm-user.python.org)).* [Open an issue](https://github.com/fastlmm/FaST-LMM/issues) on GitHub. Citing FaST-LMMIf you use FaST-LMM in any published work, please cite [the relevant manuscript(s)](https://github.com/fastlmm/FaST-LMM/blob/master/README.md) describing it. Data preparationThis version of FaST-LMM is designed for use with randomly ascertained data with Gaussian residuals. If you have case-control data with substantial ascertainment bias, you should first transform your phenotype(s) using [LEAP](https://github.com/omerwe/LEAP) (Weissbrod _et al._, _arXiv_ 2014). If you are analyzing continuous phenotypes with non-Gaussian residuals, you should first transform your phenotype(s) using [Warped-LMM](https://github.com/PMBio/warpedLMM) (Fusi et al., _Nature Commun_ 2014).FaST-LMM uses four input files containing (1) the SNP data to be tested, (2) the SNP data used to determine the genetic similarity matrix (GSM) between individuals (which can be different from 1), (3) the phenotype data, and (4, optionally) a set of covariates.SNP files can be in PLINK format (ped/map, tped/tfam, bed/bim/fam, or fam/dat/map). For the most speed, use the binary format in SNP major order. See the PLINK manual http://pngu.mgh.harvard.edu/~purcell/plink/ (Purcell et al., _Am J Hum Genet_ 2007) for further details. FaST-LMM also supports Hdf5 file format http://www.hdfgroup.org/HDF5/whatishdf5.html. See https://github.com/fastlmm/PySnpTools for more details. Note that each SNP will be standardized to have mean zero and standard deviation one across all individuals before processing. Missing values are mean imputed.The required file containing the phenotype uses the PLINK alternate phenotype format with no header. The covariate file also uses this format (with additional columns for multiple covariates). Notebook preparation and general use To prepare this notebook to run analyses, please run the following script.
###Code
#Import these for Python2 & Python 3 support
from __future__ import absolute_import
from __future__ import print_function
# set some ipython notebook properties
%matplotlib inline
# set degree of verbosity (adapt to INFO for more verbose output)
import logging
logging.basicConfig(level=logging.WARNING)
# set figure sizes
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
#pylab.plot([1,2,3],[4,5,6])
# set display width for pandas data frames
import pandas as pd
pd.set_option('display.width', 1000)
###Output
_____no_output_____
###Markdown
If you would like to run any of the code below from the command line, first copy it into a file (_e.g._, `test.py`), and then run it by typing `python text.py` at the command line.If you would like to see all the options for a function just type `? ` to an ipython prompt. Single-SNP association testing Traditional analysis: LMM(all) First, let's run a standard LMM analysis in which the GSM uses (almost) all available SNPs. The model for this analysis is called LMM(all) in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). We'll apply this model to the synthetic data in `tests\datasets\synth`. The data has 500 samples with 5000 SNPs, and was generated from a Balding-Nichols model with FST=0.05.When using a linear mixed model for association analysis, it is important to avoid proximal contamination ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). To understand proximal contamination, first note that a LMM with no fixed effects, using a realized relationship matrix for the GSM (as FaST-LMM does), is mathematically equivalent to linear regression of the SNPs on the phenotype, with weights integrated over independent Normal distributions having the same variance (_e.g._, Hayes _et al._, _Genet Res_ 2009). That is, a LMM using a given set of SNPs for the GSM is equivalent to a form of linear regression using those SNPs as covariates to correct for confounding. This equivalence implies that, when testing a given SNP, that SNP (and SNPs physically close to it) should be excluded from the computation of the GSM. If not, when testing a particular SNP, we would also be using that same SNP as a covariate, making the log likelihood of the null model higher than it should be, thus leading to deflation of the test statistic and loss of power.Excluding the SNP you are testing and those SNPs in close proximity to it from the GSM in a naïve way is extremely computationally expensive. A computationally efficient approach for performing the exclusion is to use a GSM computed from all but chromosome $i$ when testing SNPs on chromosome $i$ ([Lippert _et al._, _Nat Meth_ 2011](http://www.nature.com/nmeth/journal/v8/n10/abs/nmeth.1681.html)). We call this approach leave out one chromosome (LOOC). The analysis here does this.
###Code
# import the algorithm
from fastlmm.association import single_snp
# set up data
##############################
bed_fn = "../../tests/datasets/synth/all"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# run gwas
###################################################################
results_df = single_snp(bed_fn, pheno_fn, covar=cov_fn, count_A1=False)
# manhattan plot
import pylab
import fastlmm.util.util as flutil
pylab.rcParams['figure.figsize'] = (10.0, 8.0)#For some reason, need again (appears above too) to get big figures
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
Improving power: LMM(all+select) In [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874), we have shown that power can be increased while still maintaining control of type I error by using two GSMs: one based on all SNPs (`G0`) and one based on selected SNPs that are highly correlated with the phenotype (`G1`). The model is called LMM(select + all). This approach has greater computational demands (we recommend using a cluster computer when analyzing large data sets). Here is an example of how to apply this model to the synthetic data.
###Code
# example for two kernel feature selection
# this takes a couple of minutes to run on a 20-proc machine.
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp_all_plus_select
from pysnptools.util.mapreduce1.runner import LocalMultiProc
runner = LocalMultiProc(20,mkl_num_threads=5)
# define file names
snp_reader = Bed("../../tests/datasets/synth/all", count_A1=True)
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# find the chr5 SNPs
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
#select the 2nd kernel and run GWAS
results_df = single_snp_all_plus_select(test_snps=test_snps,G=snp_reader,pheno=pheno_fn,GB_goal=2,do_plot=True,runner=runner)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Aside: In some applications, you may want to use two kernels constructed from two pre-specified sets of SNPs (i.e., with no feature selection). Here we show how to do that and how to simultaneously find h2 and the mixing weight between the kernels.
###Code
# example script for two kernel without feature selection
import numpy as np
import pysnptools.util
from pysnptools.snpreader import Bed
from fastlmm.association import single_snp
# define file names
bed_fn = "../../tests/datasets/synth/all"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# select data
###################################################################
snp_reader = Bed(bed_fn,count_A1=True)
# partition snps on chr6 vs rest
G1_chr = 6
G0 = snp_reader[:,snp_reader.pos[:,0] != G1_chr]
G1 = snp_reader[:,snp_reader.pos[:,0] == G1_chr]
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp(test_snps, pheno_fn, K0=G0, K1=G1, covar=cov_fn, GB_goal=2, count_A1=True)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
_____no_output_____
###Markdown
Improving speed and memory use when there is little family strucutre: LMM(select)+PCs In the same publication, we have shown that a simpler, more computationally efficient model can be used when the data is confounded only by population structure and not by family structure or cryptic relatedness. Under these circumstances, we have found that a model with a single GSM based on selected SNPs in combination with principle components as fixed-effect covariates yields good control of type I error and power. This model, called LMM(select)+PCs, should be used with caution. Even if you explicitly remove closely related individuals from your data, cryptic relatedness may remain.To use this model, first identify the principle components to be used with the PCgeno algorithm as described in [Widmer et al., Scientific Reports 2014](http://www.nature.com/articles/srep06874). Then you can call single_snp_select with these PCs as covariates.
###Code
from pysnptools.snpreader import Bed
from fastlmm.util import compute_auto_pcs
from fastlmm.association import single_snp_select
# define file names
bed_fn = "../../tests/datasets/synth/all"
snp_reader = Bed(bed_fn,count_A1=True)
phen_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
# find number of PCs
pcs = compute_auto_pcs(bed_fn,count_A1=True)
print("selected number of PCs:", pcs["vals"].shape[1])
# test on chr5
test_snps = snp_reader[:,snp_reader.pos[:,0] == 5]
results_df = single_snp_select(test_snps=test_snps, G=snp_reader, pheno=phen_fn, covar=pcs, GB_goal=2)
import fastlmm.util.util as flutil
flutil.manhattan_plot(results_df[["Chr", "ChrPos", "PValue"]].values,pvalue_line=1e-5,xaxis_unit_bp=False)
pylab.show()
results_df.head()
###Output
selected number of PCs: 1
###Markdown
Epistasis You can test for epistatic interactions between pairs of SNPs as well. Here is an example analysis applied to the same synthetic data. Note that this version of the code uses a likelihood ratio test based on maximum-likelihood estimates. A REML-based version is in the works.
###Code
# import the algorithm and reader
from fastlmm.association import epistasis
from pysnptools.snpreader import Bed
# define file names
bed_reader = Bed("../../tests/datasets/synth/all", count_A1=True)
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
# partition data into the first 50 SNPs on chr1 and all but chr1
G0 = bed_reader[:,bed_reader.pos[:,0] != 1]
test_snps = bed_reader[:,bed_reader.pos[:,0] == 1][:,0:50]
# run epistasis analysis
results_df = epistasis(test_snps, pheno_fn, G0=G0, covar=cov_fn)
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["PValue"].values, xlim=[0,5], ylim=[0,5])
# print head of results data frame
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
lambda=0.9234
###Markdown
SNP-set association testing SNP-set association testing is performed similarly to single-SNP testing, except we test sets of SNPs by putting them together into one GSM, separate from any (optional) background GSM that corrects for confounding. Both LRT ("lrt") and score ("sc_davies") tests are supported. The LRT is computed by default, but you can switch to the score test by setting test=“sc_davies”. The score test can be more conservative in some settings (and hence can have less power), but it is closed form and often can be computed much faster. The algorithms in their current form are described in [Lippert et al., Bioinformatics 2014](http://bioinformatics.oxfordjournals.org/content/30/22/3206).Here is an example that uses the LRT and no background GSM--note the inflation in the results.
###Code
# this will take a few minutes to run
# import the algorithm and reader
from fastlmm.association import snp_set
from pysnptools.snpreader import Bed
import datetime
# define file names
test_snps_fn = "../../tests/datasets/synth/chr1"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
cov_fn = "../../tests/datasets/synth/cov.txt"
set_list_fn = "../../tests/datasets/synth/chr1.sets.txt"
G0_fn = None
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
_____no_output_____
###Markdown
As just mentioned, we see inflation of the test statistic because no background GSM was used. When you include a background GSM (using `G0`) type I error is better controlled. As in the single SNP case, proximal contamination is avoided using LOOC.
###Code
#Here we give a G0, the GSM, created via Bed file with all the chroms except chrom 1 (the test snps)
#WARNING: Even on a fast machine, this takes 24 minutes (but, running with test="sc_davies" takes seconds)
bed_reader = Bed("../../tests/datasets/synth/all.bed",count_A1=True)
chrNot1 = bed_reader[:,bed_reader.pos[:,0] != 1]
G0_fn = "../../tests/datasets/synth/chrNot1.bed"
Bed.write(G0_fn,chrNot1.read(),count_A1=True)
# run SNP-set analysis
results_df = snp_set(test_snps=test_snps_fn, G0=G0_fn, set_list=set_list_fn, pheno=pheno_fn, covar=cov_fn, test="lrt")
# qq plot
from fastlmm.util.stats import plotp
plotp.qqplot(results_df["P-value"].values, xlim=[0,5], ylim=[0,5], addlambda=False, legend="QQ")
# print head of results data frame
import pandas as pd
pd.set_option('display.width', 1000)
results_df.head(n=10)
###Output
constructing LMM - this should only happen once.
###Markdown
The `G1` option available for single-marker testing is not yet supported for set testing. With the same caveats as previously described, however, you can use PC covariates and a `G0` based on selected SNPs when your data is confounded, for example, by population structure. PredictionWe can train on one set of examples and make predictions on another set.
###Code
from pysnptools.snpreader import Pheno, Bed
from fastlmm.inference import FastLMM
import numpy as np
# define file names
snp_reader = Bed("../../tests/datasets/synth/all",count_A1=True)
cov_fn = "../../tests/datasets/synth/cov.txt"
pheno_fn = "../../tests/datasets/synth/pheno_10_causals.txt"
# Divide the data into train (all but the last 10 individuals) and test (the last 10 individuals)
# (the cov and pheno will automatially be divided to match)
train = snp_reader[:-10,:]
test = snp_reader[-10:,:]
# In the style of scikit-learn, create a predictor and then train it.
fastlmm = FastLMM(GB_goal=2)
fastlmm.fit(K0_train=train,X=cov_fn,y=pheno_fn)
# Now predict with it
mean, covariance = fastlmm.predict(K0_whole_test=test,X=cov_fn)
print("Predicted means and stdevs")
print(mean.val[:,0])
print(np.sqrt(np.diag(covariance.val)))
#Plot actual phenotype and predicted phenotype
whole_pheno = Pheno(pheno_fn)
actual_pheno = whole_pheno[whole_pheno.iid_to_index(mean.iid),:].read()
pylab.plot(actual_pheno.val,"r.")
pylab.plot(mean.val,"b.")
pylab.errorbar(np.arange(mean.iid_count),mean.val,yerr=np.sqrt(np.diag(covariance.val)),fmt='.')
pylab.xlabel('testing examples')
pylab.ylabel('phenotype, actual (red) and predicted (blue with stdev)')
pylab.show()
###Output
Predicted means and stdevs
[-0.01441867 -0.28643404 -0.25428548 0.08895204 -0.29511011 -0.39238039
0.06844378 -0.43282083 -0.21734717 -0.35522392]
[0.95795614 0.96181175 0.95260425 0.9499153 0.96047779 0.95411447
0.96020393 0.9817148 0.95198988 0.96271044]
|
python/download_dataset.ipynb | ###Markdown
Images will be dowloaded from **google images** using [google-images-download](https://github.com/hardikvasa/google-images-download) project.
###Code
response = google_images_download.googleimagesdownload()
response.download(
{
"keywords":"croissant,pain au chocolat",
"output_directory":"dataset",
"limit":300,
"chromedriver": "/home/pierre/Development/Devoxx/venv/lib/python3.6/site-packages/chromedriver_binary/chromedriver"
})
###Output
_____no_output_____ |
models/nn model without non scored[old cv].ipynb | ###Markdown
This is an Updated version of my previous public kernel Updates -* Implementing Feature Engineering * Implementing Label Smoothing **If U find it helpful and consider forking , please do Upvote** :)
###Code
import sys
sys.path.append('../input/iterativestratification')
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
import os
import copy
import seaborn as sns
from sklearn import preprocessing
from sklearn.metrics import log_loss
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA,FactorAnalysis
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import QuantileTransformer
os.listdir('../input/lish-moa')
train_features = pd.read_csv('../input/lish-moa/train_features.csv')
train_targets_scored = pd.read_csv('../input/lish-moa/train_targets_scored.csv')
train_targets_nonscored = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv')
test_features = pd.read_csv('../input/lish-moa/test_features.csv')
sample_submission = pd.read_csv('../input/lish-moa/sample_submission.csv')
GENES = [col for col in train_features.columns if col.startswith('g-')]
CELLS = [col for col in train_features.columns if col.startswith('c-')]
#RankGauss
for col in (GENES + CELLS):
transformer = QuantileTransformer(n_quantiles=100,random_state=0, output_distribution="normal")
vec_len = len(train_features[col].values)
vec_len_test = len(test_features[col].values)
raw_vec = train_features[col].values.reshape(vec_len, 1)
transformer.fit(raw_vec)
train_features[col] = transformer.transform(raw_vec).reshape(1, vec_len)[0]
test_features[col] = transformer.transform(test_features[col].values.reshape(vec_len_test, 1)).reshape(1, vec_len_test)[0]
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(seed=42)
# GENES
n_comp = 90 #<--Update
data = pd.concat([pd.DataFrame(train_features[GENES]), pd.DataFrame(test_features[GENES])])
data2 = (FactorAnalysis(n_components=n_comp, random_state=42).fit_transform(data[GENES]))
train2 = data2[:train_features.shape[0]]; test2 = data2[-test_features.shape[0]:]
train2 = pd.DataFrame(train2, columns=[f'pca_G-{i}' for i in range(n_comp)])
test2 = pd.DataFrame(test2, columns=[f'pca_G-{i}' for i in range(n_comp)])
# drop_cols = [f'c-{i}' for i in range(n_comp,len(GENES))]
train_features = pd.concat((train_features, train2), axis=1)
test_features = pd.concat((test_features, test2), axis=1)
#CELLS
n_comp = 50 #<--Update
data = pd.concat([pd.DataFrame(train_features[CELLS]), pd.DataFrame(test_features[CELLS])])
data2 = (FactorAnalysis(n_components=n_comp, random_state=42).fit_transform(data[CELLS]))
train2 = data2[:train_features.shape[0]]; test2 = data2[-test_features.shape[0]:]
train2 = pd.DataFrame(train2, columns=[f'pca_C-{i}' for i in range(n_comp)])
test2 = pd.DataFrame(test2, columns=[f'pca_C-{i}' for i in range(n_comp)])
# drop_cols = [f'c-{i}' for i in range(n_comp,len(CELLS))]
train_features = pd.concat((train_features, train2), axis=1)
test_features = pd.concat((test_features, test2), axis=1)
train_features.shape
from sklearn.feature_selection import VarianceThreshold
#var_thresh = VarianceThreshold(0.8) #<-- Update
var_thresh = QuantileTransformer(n_quantiles=100,random_state=0, output_distribution="normal")
data = train_features.append(test_features)
data_transformed = var_thresh.fit_transform(data.iloc[:, 4:])
train_features_transformed = data_transformed[ : train_features.shape[0]]
test_features_transformed = data_transformed[-test_features.shape[0] : ]
train_features = pd.DataFrame(train_features[['sig_id','cp_type','cp_time','cp_dose']].values.reshape(-1, 4),\
columns=['sig_id','cp_type','cp_time','cp_dose'])
train_features = pd.concat([train_features, pd.DataFrame(train_features_transformed)], axis=1)
test_features = pd.DataFrame(test_features[['sig_id','cp_type','cp_time','cp_dose']].values.reshape(-1, 4),\
columns=['sig_id','cp_type','cp_time','cp_dose'])
test_features = pd.concat([test_features, pd.DataFrame(test_features_transformed)], axis=1)
train_features.shape
from sklearn.cluster import KMeans
def fe_cluster(train, test, n_clusters_g = 45, n_clusters_c = 15, SEED = 123):
features_g = list(train.columns[4:776])
features_c = list(train.columns[776:876])
def create_cluster(train, test, features, kind = 'g', n_clusters = n_clusters_g):
train_ = train[features].copy()
test_ = test[features].copy()
data = pd.concat([train_, test_], axis = 0)
kmeans = KMeans(n_clusters = n_clusters, random_state = SEED).fit(data)
train[f'clusters_{kind}'] = kmeans.labels_[:train.shape[0]]
test[f'clusters_{kind}'] = kmeans.labels_[train.shape[0]:]
train = pd.get_dummies(train, columns = [f'clusters_{kind}'])
test = pd.get_dummies(test, columns = [f'clusters_{kind}'])
return train, test
train, test = create_cluster(train, test, features_g, kind = 'g', n_clusters = n_clusters_g)
train, test = create_cluster(train, test, features_c, kind = 'c', n_clusters = n_clusters_c)
return train, test
train_features ,test_features=fe_cluster(train_features,test_features)
def fe_stats(train, test):
features_g = list(train.columns[4:776])
features_c = list(train.columns[776:876])
for df in train, test:
df['g_sum'] = df[features_g].sum(axis = 1)
df['g_mean'] = df[features_g].mean(axis = 1)
df['g_std'] = df[features_g].std(axis = 1)
df['g_kurt'] = df[features_g].kurtosis(axis = 1)
df['g_skew'] = df[features_g].skew(axis = 1)
df['c_sum'] = df[features_c].sum(axis = 1)
df['c_mean'] = df[features_c].mean(axis = 1)
df['c_std'] = df[features_c].std(axis = 1)
df['c_kurt'] = df[features_c].kurtosis(axis = 1)
df['c_skew'] = df[features_c].skew(axis = 1)
df['gc_sum'] = df[features_g + features_c].sum(axis = 1)
df['gc_mean'] = df[features_g + features_c].mean(axis = 1)
df['gc_std'] = df[features_g + features_c].std(axis = 1)
df['gc_kurt'] = df[features_g + features_c].kurtosis(axis = 1)
df['gc_skew'] = df[features_g + features_c].skew(axis = 1)
return train, test
train_features,test_features=fe_stats(train_features,test_features)
train = train_features.merge(train_targets_scored, on='sig_id')
train = train[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)
test = test_features[test_features['cp_type']!='ctl_vehicle'].reset_index(drop=True)
target = train[train_targets_scored.columns]
train = train.drop('cp_type', axis=1)
test = test.drop('cp_type', axis=1)
train
target_cols = target.drop('sig_id', axis=1).columns.values.tolist()
folds = train.copy()
mskf = MultilabelStratifiedKFold(n_splits=5)
for f, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):
folds.loc[v_idx, 'kfold'] = int(f)
folds['kfold'] = folds['kfold'].astype(int)
folds
print(train.shape)
print(folds.shape)
print(test.shape)
print(target.shape)
print(sample_submission.shape)
###Output
(21948, 1296)
(21948, 1297)
(3624, 1090)
(21948, 207)
(3982, 207)
###Markdown
Dataset Classes
###Code
class MoADataset:
def __init__(self, features, targets):
self.features = features
self.targets = targets
def __len__(self):
return (self.features.shape[0])
def __getitem__(self, idx):
dct = {
'x' : torch.tensor(self.features[idx, :], dtype=torch.float),
'y' : torch.tensor(self.targets[idx, :], dtype=torch.float)
}
return dct
class TestDataset:
def __init__(self, features):
self.features = features
def __len__(self):
return (self.features.shape[0])
def __getitem__(self, idx):
dct = {
'x' : torch.tensor(self.features[idx, :], dtype=torch.float)
}
return dct
def train_fn(model, optimizer, scheduler, loss_fn, dataloader, device):
model.train()
final_loss = 0
for data in dataloader:
optimizer.zero_grad()
inputs, targets = data['x'].to(device), data['y'].to(device)
# print(inputs.shape)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
loss.backward()
optimizer.step()
scheduler.step()
final_loss += loss.item()
final_loss /= len(dataloader)
return final_loss
def valid_fn(model, loss_fn, dataloader, device):
model.eval()
final_loss = 0
valid_preds = []
for data in dataloader:
inputs, targets = data['x'].to(device), data['y'].to(device)
outputs = model(inputs)
loss = loss_fn(outputs, targets)
final_loss += loss.item()
valid_preds.append(outputs.sigmoid().detach().cpu().numpy())
final_loss /= len(dataloader)
valid_preds = np.concatenate(valid_preds)
return final_loss, valid_preds
def inference_fn(model, dataloader, device):
model.eval()
preds = []
for data in dataloader:
inputs = data['x'].to(device)
with torch.no_grad():
outputs = model(inputs)
preds.append(outputs.sigmoid().detach().cpu().numpy())
preds = np.concatenate(preds)
return preds
import torch
from torch.nn.modules.loss import _WeightedLoss
import torch.nn.functional as F
class SmoothBCEwLogits(_WeightedLoss):
def __init__(self, weight=None, reduction='mean', smoothing=0.0):
super().__init__(weight=weight, reduction=reduction)
self.smoothing = smoothing
self.weight = weight
self.reduction = reduction
@staticmethod
def _smooth(targets:torch.Tensor, n_labels:int, smoothing=0.0):
assert 0 <= smoothing < 1
with torch.no_grad():
targets = targets * (1.0 - smoothing) + 0.5 * smoothing
return targets
def forward(self, inputs, targets):
targets = SmoothBCEwLogits._smooth(targets, inputs.size(-1),
self.smoothing)
loss = F.binary_cross_entropy_with_logits(inputs, targets,self.weight)
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'mean':
loss = loss.mean()
return loss
class Model(nn.Module): # <-- Update
def __init__(self, num_features, num_targets, hidden_size):
super(Model, self).__init__()
self.batch_norm1 = nn.BatchNorm1d(num_features)
self.dense1 = nn.utils.weight_norm(nn.Linear(num_features, hidden_size))
self.batch_norm2 = nn.BatchNorm1d(hidden_size)
self.dropout2 = nn.Dropout(0.25)
self.dense2 = nn.Linear(hidden_size, hidden_size)
self.batch_norm3 = nn.BatchNorm1d(hidden_size)
self.dropout3 = nn.Dropout(0.25)
self.dense3 = nn.utils.weight_norm(nn.Linear(hidden_size, num_targets))
def forward(self, x):
x = self.batch_norm1(x)
x = F.leaky_relu(self.dense1(x))
x = self.batch_norm2(x)
x = self.dropout2(x)
x = F.leaky_relu(self.dense2(x))
x = self.batch_norm3(x)
x = self.dropout3(x)
x = self.dense3(x)
return x
def process_data(data):
data = pd.get_dummies(data, columns=['cp_time','cp_dose'])
return data
feature_cols = [c for c in process_data(folds).columns if c not in target_cols]
feature_cols = [c for c in feature_cols if c not in ['kfold','sig_id']]
len(feature_cols)
# HyperParameters
DEVICE = ('cuda' if torch.cuda.is_available() else 'cpu')
EPOCHS = 25
BATCH_SIZE = 128
LEARNING_RATE = 5e-3
WEIGHT_DECAY = 1e-5
NFOLDS = 5 #<-- Update
EARLY_STOPPING_STEPS = 10
EARLY_STOP = False
num_features=len(feature_cols)
num_targets=len(target_cols)
hidden_size=2048
def run_training(fold, seed):
seed_everything(seed)
train = process_data(folds)
test_ = process_data(test)
trn_idx = train[train['kfold'] != fold].index
val_idx = train[train['kfold'] == fold].index
train_df = train[train['kfold'] != fold].reset_index(drop=True)
valid_df = train[train['kfold'] == fold].reset_index(drop=True)
x_train, y_train = train_df[feature_cols].values, train_df[target_cols].values
x_valid, y_valid = valid_df[feature_cols].values, valid_df[target_cols].values
train_dataset = MoADataset(x_train, y_train)
valid_dataset = MoADataset(x_valid, y_valid)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
model = Model(
num_features=num_features,
num_targets=num_targets,
hidden_size=hidden_size,
)
model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=5e-3, weight_decay=WEIGHT_DECAY)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3,
max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader))
loss_fn = nn.BCEWithLogitsLoss()
loss_tr = SmoothBCEwLogits(smoothing =0.001)
early_stopping_steps = EARLY_STOPPING_STEPS
early_step = 0
oof = np.zeros((len(train), target.iloc[:, 1:].shape[1]))
best_loss = np.inf
for epoch in range(EPOCHS):
train_loss = train_fn(model, optimizer,scheduler, loss_tr, trainloader, DEVICE)
print(f"SEED: {seed}, FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss}")
valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, DEVICE)
print(f"SEED: {seed} ,FOLD: {fold}, EPOCH: {epoch}, valid_loss: {valid_loss}")
if valid_loss < best_loss:
best_loss = valid_loss
oof[val_idx] = valid_preds
torch.save(model.state_dict(), f"FOLD{fold}_.pth")
elif(EARLY_STOP == True):
early_step += 1
if (early_step >= early_stopping_steps):
break
#--------------------- PREDICTION---------------------
x_test = test_[feature_cols].values
testdataset = TestDataset(x_test)
testloader = torch.utils.data.DataLoader(testdataset, batch_size=BATCH_SIZE, shuffle=False)
model = Model(
num_features=num_features,
num_targets=num_targets,
hidden_size=hidden_size,
)
model.load_state_dict(torch.load(f"FOLD{fold}_.pth"))
model.to(DEVICE)
predictions = np.zeros((len(test_), target.iloc[:, 1:].shape[1]))
predictions = inference_fn(model, testloader, DEVICE)
return oof, predictions
def run_k_fold(NFOLDS, seed):
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for fold in range(NFOLDS):
oof_, pred_ = run_training(fold, seed)
predictions += pred_ / NFOLDS
oof += oof_
return oof, predictions
# Averaging on multiple SEEDS
SEED = [940, 1513, 1269,1392,1119,1303] #<-- Update
oof = np.zeros((len(train), len(target_cols)))
predictions = np.zeros((len(test), len(target_cols)))
for seed in SEED:
oof_, predictions_ = run_k_fold(NFOLDS, seed)
oof += oof_ / len(SEED)
predictions += predictions_ / len(SEED)
train[target_cols] = oof
test[target_cols] = predictions
train_targets_scored
len(target_cols)
valid_results = train_targets_scored.drop(columns=target_cols).merge(train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)
y_true = train_targets_scored[target_cols].values
y_pred = valid_results[target_cols].values
score = 0
for i in range(len(target_cols)):
score_ = log_loss(y_true[:, i], y_pred[:, i])
score += score_ / target.shape[1]
print("CV log_loss: ", score)
sub = sample_submission.drop(columns=target_cols).merge(test[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)
sub.to_csv('submission.csv', index=False)
sub.shape
###Output
_____no_output_____ |
dev-notebooks/is2_demo_var_subset.ipynb | ###Markdown
Choose a region for subsetting as well. Use the same region as in the core demo.
###Code
region_a = ipd.Icesat2Data('ATL09',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_07 = ipd.Icesat2Data('ATL07',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
session=region_a.earthdata_login('liuzheng','[email protected]')
session=region_a.earthdata_login('jessica.scheick','[email protected]')
###Output
Earthdata Login password: ········
###Markdown
Now, generate variable dictionary. Get the variable dictionary by parsing the dataset xml information from NSIDC, by calling ```show_custom_options(session)```. The data variables are stored in ```region_a._cust_options['variables']```.
###Code
opts = region_a.show_custom_options(session,dictview=True)
###Output
_____no_output_____
###Markdown
TESTING SECTION: Setup the user provided variable list to subset variables: Please TRY OUT the tests below. Options for inputting variables:1. Use a default list for the dataset (not yet fully implemented across all datasets)2. Provide a list of variable names, which will return all path-variable combinations (e.g. longitude will return longitude for both beams for all profiles)3. Provide a list of variable names and/or specific profiles/beams (not yet implemented).An example of each type of input is below. Test 1:Add ```latitude``` for profile 1 and 2
###Code
#default variables
var_dict = region_a.build_wanted_var_list(beam_list=['profile_1','profile_2'],var_list=['latitude'])
region_a.variables
#pprint(var_dict)
###Output
_____no_output_____
###Markdown
Test 2:Add ```latitude``` for profile 2 and overwrite
###Code
region_a.build_wanted_var_list(beam_list=['profile_2'],var_list=['latitude'], append=False)
region_a.variables
###Output
_____no_output_____
###Markdown
Test 2B:Add ```latitude``` for profile 3 and overwrite (so profile_2 should be removed)
###Code
region_a.build_wanted_var_list(beam_list=['profile_3'],var_list=['latitude'],append=False)
region_a.variables
###Output
_____no_output_____
###Markdown
Test 3:Add ```latitude``` for all profiles and with keyword ```low_rate``` and append
###Code
#region_a.build_wanted_var_list(beam_list=['profile_3'],var_list=['latitude'],keyword_list=['low_rate'],append=True)
region_a.build_wanted_var_list(var_list=['latitude'],keyword_list=['low_rate'],append=True)
region_a.variables
###Output
_____no_output_____
###Markdown
Before Test 4:Go back to test 2. Overwrite ```latitude``` for profile 2 only.
###Code
region_a.build_wanted_var_list(beam_list=['profile_2'],var_list=['latitude'],append=False)
region_a.variables
###Output
_____no_output_____
###Markdown
Test 5:Append ```latitude``` for profile 3 and ```high_rate``` only
###Code
region_a.build_wanted_var_list(beam_list=['profile_3'],var_list=['latitude'],keyword_list=['low_rate'])
region_a.variables
###Output
_____no_output_____
###Markdown
Test 6:Add ```sc_orient_time``` under ```orbit_info```.
###Code
region_a.build_wanted_var_list(keyword_list=['orbit_info'],var_list=['sc_orient_time'])
region_a.variables
###Output
_____no_output_____
###Markdown
Test 7:Add all variables under ```orbit_info``` but path to ```sc_orient_time``` should not be duplicated.
###Code
region_a.build_wanted_var_list(keyword_list=['orbit_info'],append=True)
region_a.variables
###Output
_____no_output_____
###Markdown
Test 8:Add all defaults for all beams and all keywords. After this, have to reinitialize ```region_a``` and regenerate variable dictionary to run the above tests again (unless you set ```append=False```).
###Code
region_a.build_wanted_var_list(defaults=True)
#variable names + beams/profiles
###STILL NEED TO MAKE THE BELOW POSSIBLE IN THE CODE
###Output
_____no_output_____
###Markdown
Setting params and download
###Code
region_a.build_CMR_params()
region_a.build_reqconfig_params('download')
region_a.build_subset_params(**{'Coverage':var_dict})
region_a.subsetparams
#Identical to above block, but enters the keywords with a different style
region_a.build_subset_params(Coverage=var_dict)
region_a.subsetparams
region_a.order_granules(session, verbose=True)
region_a.download_granules(session,'.')
###Output
_____no_output_____
###Markdown
Examine downloaded subset data file
###Code
fn = '166458094/processed_ATL09_20190222003738_08490201_002_01.h5'
###Output
_____no_output_____
###Markdown
Check the downloaded datasetTake ```latitude``` for example,
###Code
varname = 'latitude'
#varname = 'sc_orient'
varlist = []
def IS2h5walk(vname, h5node):
if isinstance(h5node, h5py.Dataset):
varlist.append(vname)
return
with h5py.File(fn,'r') as h5pt:
h5pt.visititems(IS2h5walk)
for tvar in varlist:
vpath,vn = os.path.split(tvar)
if vn==varname: print(tvar)
###Output
_____no_output_____
###Markdown
Compare the varaible ```latitude``` in the original data and the subsetted dat
###Code
region_a.variables['latitude']
', '.join(x) for x in ['gt1l','gt1r']
###Output
_____no_output_____
###Markdown
Look at variables from various datasets to generalize code
###Code
region_06 = ipd.Icesat2Data('ATL06',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_07 = ipd.Icesat2Data('ATL07',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_08 = ipd.Icesat2Data('ATL08',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_09 = ipd.Icesat2Data('ATL09',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_10 = ipd.Icesat2Data('ATL10',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
region_12 = ipd.Icesat2Data('ATL12',[-55, 68, -48, 71],['2019-02-22','2019-02-28'], \
start_time='00:00:00', end_time='23:59:59', version='2')
session=region_a.earthdata_login('liuzheng','[email protected]')
session=region_06.earthdata_login('jessica.scheick','[email protected]')
dset = region_10
dset.show_custom_options(session,dictview=True)
## show the maximum depth for variables in dataset
for dset in [region_06, region_07, region_08, region_09, region_10, region_12]:
dset._get_custom_options(session)
max_dep = 0
for vn in dset._cust_options['variables']:
wrds = vn.split('/')
if len(wrds)-1> max_dep: max_dep = len(wrds)-1
print(dset.dataset,max_dep)
vgrp, paths = region_10._parse_var_list(region_10._cust_options['variables'])
import pprint
pprint.pprint(vgrp)
vgrp.keys()
for dset in [region_06, region_07, region_08, region_09, region_10, region_12]:
dset.show_custom_options(session, dictview=True)
d=6
for dset in [region_06, region_07, region_08, region_09, region_10, region_12]:
vgrp, paths = dset._parse_var_list(dset._cust_options['variables'])
print(d)
d=d+1
for p in paths:
print(np.unique(np.array(p)))
print(np.unique(np.array(vgrp.keys())))
###Output
_____no_output_____ |
tutorials/Variables.ipynb | ###Markdown
Variables
###Code
%load ../rapaio-bootstrap
###Output
_____no_output_____
###Markdown
Variable represents unidimensional sets of observations which comes from the same random variable (hence the name). Because the values of a variable are supposed to be generated from the same process, they have the same type and semantic. Variables have names and are of some given types. `Var` implementations can be categorized as storage variables and view variables. Storage variables are those variables which directly contains and maintain data. View variables are higher constructs obtained by filtering and/or merging other variables. View variables does not contain data directly, but maintain refferences to storage variables it wrap and allows reading and updating operations in the same way as storage variables. Strorage variables implemented have different storage types and representations. We have `VarDouble`, `VarInt`, `VarNominal`, `VarBinary`, `VarLong`, `VarString` and `VarInstant`. VarDouble Numeric double variables are implemented by `VarDouble` and are used to handle discrete or continuous numerical values. Double variables offers value and label representations. All other representations can be used, but with caution since it can alter the content. For example int representation truncates floating point values to the biggest integer values, however the int setter sets a correct value since an integer can be converted to a double with no information loss. Various buildersDouble variables can be built in various was and there are handy shortcuts for various scenarios.
###Code
// builds a variable with no data
Var empty1 = VarDouble.empty();
// builds a variable of a given size which contains only missing data
Var empty2 = VarDouble.empty(100);
// a sequence of numbers, starting from 0, ending with 5 with step 1
Var seq1 = VarDouble.seq(5);
// a sequence of numbers, starting from 1, ending with 5 with step 1
Var seq2 = VarDouble.seq(1, 5);
// a sequence of numbers starting at 0, ending at 1 with step 0.1
Var seq3 = VarDouble.seq(0, 1, 0.1);
// build a variable of a given size which contains only zeros
Var fill1 = VarDouble.fill(5);
// builds a variable of a given size which contains only ones
Var fill2 = VarDouble.fill(5, 1);
// numeric variable which contains the values copied from another variable
Var copy1 = VarDouble.copy(seq1);
// numeric variable with values copied from a collection
Normal normal = Normal.std();
List<Double> list1 = DoubleStream.generate(normal::sampleNext).limit(10).boxed().collect(Collectors.toList());
Var copy2 = VarDouble.copy(list1);
// numeric variable with values copied from a double array
Var copy3 = VarDouble.copy(1, 3, 4.0, 7);
// numeric variable with values copied from an int array
Var copy4 = VarDouble.copy(1, 3, 4, 7);
// numeric variables with values generated as the sqrt of the row number
Var from1 = VarDouble.from(10, Math::sqrt);
// numeric variable with values generated using a function which receives a row value
// as parameter and outputs a double value; in this case we generate values as
// a sum of the values of other two variables
Var from2 = VarDouble.from(4, row -> copy3.getDouble(row) + copy4.getDouble(row));
// numeric variable with values generated from values of another variable using
// a transformation provided via a lambda function
Var from3 = VarDouble.from(from1, x -> x + 1);
###Output
_____no_output_____
###Markdown
**Wrapper around a double array**This builder creates a new numeric variable instance as a wrapper around a double array of values. Notice that it is not the same as the copy builder, since in the wrapper case any change in the new numerical variable is reflected also in the original array of numbers. In the case of the copy builder this is not true, since the copy builder \(as its name implies\) creates an internal copy of the array.
###Code
double[] array = DoubleArrays.newFrom(1, 5, x -> x*x);
Var wrap1 = VarDouble.wrap(array);
wrap1.printString();
array[2] = 17;
wrap1.printString();
###Output
VarDouble [name:"?", rowCount:4, values: 1.0, 4.0, 9.0, 16.0]
VarDouble [name:"?", rowCount:4, values: 1.0, 4.0, 17.0, 16.0]
###Markdown
Printing variables Most of the objects which contains information implements the `Printable` interface. This interface allows one to display a summary of the content of the given object. This is the case also with the numerical variables. Additionally, the numerical variables implements also two other methods, one which displays all the values and another one which displays only the first values.
###Code
// build a numerical variable with values as the sqrt
// of the first 200 integer values
Var x = VarDouble.from(200, Math::sqrt).name("x");
// prints the text produced by toString
x.printString();
// print a reasonable part of value
x.printContent();
// print all values of the variable
x.printFullContent();
// print a summary of the content of the variable
x.printSummary();
WS.image(points(x, VarDouble.from(x.size(), Normal.std()::sampleNext), color(1), pch(3)), 600, 300);
###Output
_____no_output_____
###Markdown
VarOp interfaceThere are various mathemaical operations available under VarOp interface. The interface to those operators can be called using `op()` method on any variable. The fllowing examples uses some of those operators.
###Code
// computes the sum of all values in variable
x.op().nansum();
// apply a lambda function on a copy of the varialble
x.copy().op().apply(v -> Math.sqrt(v + 3./8)).printContent();
// add a constant to all values of a copy
x.copy().op().plus(Math.E);
###Output
_____no_output_____
###Markdown
Nominal variables Nominal variables are defined by`VarNominal`and contains string valued categories. Nominal variables offers integer and label representations. The label representation describes the categories as labels or texts, while integer representation is an integer indexe on categories. The index representation does not imply an order between categories/labels. Various builders Nominal variables can be built in various was and are handy shortcuts for various scenarios.
###Code
// creates an empty nominal variable with provided levels
var nom1 = VarNominal.empty(10, "a", "b");
// note the first label which is a placeholder for missing values
nom1.levels();
VarNominal.from(10, row -> row % 2 == 0 ? "even" : "odd").printString();
VarNominal.copy("a", "b", "c", "b").printContent()
Unique.of(VarNominal.copy("a","b","c","b"))
###Output
_____no_output_____
###Markdown
Overview of variable types All variables implements a common API making easy the manipulation of it's content in a generic way. However, depending on type, some variables might not implement some operations or rely back on specific implementations which makes sense for that variable type. For example, for a numeric variable it makes sense to set the value at some specific index in order to change it. For nominal variables, the same operation would not make sense. Instead of the meaning 'change numerical value at some given position' will have the following semantic 'change the string value to that category associated with the rounded integer value of the double parameter'. Let's see an example:
###Code
// we create a nominal value with label `a` of first position and label `b` on the second position
var nom = VarNominal.copy("a", "b");
nom.printString();
// set the value from the first position to the label which corresponds to indexed label 2, which is `b`
nom.setDouble(0, 2.1);
// let's see the result
nom.printString();
###Output
VarNominal [name:"?", rowCount:2, values: a, b]
VarNominal [name:"?", rowCount:2, values: b, b]
###Markdown
Missing values All variables offers API for missing values. A missing value is a special value which is used as placeholder for an unspecified value. We can have missing values for various reasons. There are cases when the data set does not contains some values because the experimenter did not collect it. Sometimes having a value does not make sense, for example a male cannot be pregnant, so measuring any metric related with pregnancy have missing value for male subjects. Also, missing values can appear as an effect of data manipulation operations, like joining two data frames which does not present a one-to-one presence relation.Missing values are different for each representation, which makes sense since a double value have a different type than a String.
###Code
WS.println(VarDouble.MISSING_VALUE);
WS.println(VarNominal.MISSING_VALUE);
WS.println(VarInt.MISSING_VALUE);
###Output
NaN
?
2.147483647E9
###Markdown
Most of time we do not have to deals directly with the missing value placeholders, since `Var` and `Frame` interfaces offers a way to handle gracefully missing value operations. Below is an illustrative example:
###Code
var x = VarDouble.seq(10).name("x");
x.printString();
// put missing on values from indexes 2,4 and 6
x.setMissing(2);
x.setMissing(4);
x.setMissing(6);
x.printString();
// count the number of non missing values
x.stream().complete().count();
// compute the sum of all non missing values
x.op().nansum();
###Output
_____no_output_____
###Markdown
Var Iterators Each `Var` allows easy data manipulation through iterators. There is a generic construct available for each variable implementation which under the form of `VSpot`. In the terminology used in `rapaio` a spot is a position in a variable which can contain a value. Since that value can have different reporesentation, the `VSpot` interface is used to manipulate what happen in a given position. Additionally there are various iterators which can be used for other data representations. The example below are illustrative:
###Code
var d = VarDouble.seq(3).name("d");
// iterate through double values, we can do this because we have the specific type VarDouble
for(double value : d) WS.println(value)
// for each spot print the string representation
d.forEachSpot(s -> WS.println(s.getLabel()))
// compute the sum using spot iterator and streaming API
WS.println(d.stream().mapToDouble(VSpot::getDouble).sum());
WS.println(d.op().nansum());
// display the row indexes of all values which are missing
var y = VarDouble.copy(1, Double.NaN, 2, Double.NaN, 3);
// collect indexes to an int array
int[] indexes = y.stream().filter(s -> !s.isMissing()).mapToInt(s -> s.row()).toArray();
// create a var int wrapper to see the content
VarInt.wrap(indexes).printString();
###Output
VarInt [name:"?", rowCount:3, values: 0, 2, 4]
###Markdown
Variables
###Code
%load ../rapaio-bootstrap
###Output
_____no_output_____
###Markdown
Variable represents unidimensional sets of observations which comes from the same random variable (hence the name). Because the values of a variable are supposed to be generated from the same process, they have the same type and semantic. Variables have names and are of some given types. `Var` implementations can be categorized as storage variables and view variables. Storage variables are those variables which directly contains and maintain data. View variables are higher constructs obtained by filtering and/or merging other variables. View variables does not contain data directly, but maintain refferences to storage variables it wrap and allows reading and updating operations in the same way as storage variables. Strorage variables implemented have different storage types and representations. We have `VarDouble`, `VarInt`, `VarNominal`, `VarBinary`, `VarLong`, `VarString` and `VarInstant`. VarDouble Numeric double variables are implemented by `VarDouble` and are used to handle discrete or continuous numerical values. Double variables offers value and label representations. All other representations can be used, but with caution since it can alter the content. For example int representation truncates floating point values to the biggest integer values, however the int setter sets a correct value since an integer can be converted to a double with no information loss. Various buildersDouble variables can be built in various was and there are handy shortcuts for various scenarios.
###Code
// builds a variable with no data
Var empty1 = VarDouble.empty();
// builds a variable of a given size which contains only missing data
Var empty2 = VarDouble.empty(100);
// a sequence of numbers, starting from 0, ending with 5 with step 1
Var seq1 = VarDouble.seq(5);
// a sequence of numbers, starting from 1, ending with 5 with step 1
Var seq2 = VarDouble.seq(1, 5);
// a sequence of numbers starting at 0, ending at 1 with step 0.1
Var seq3 = VarDouble.seq(0, 1, 0.1);
// build a variable of a given size which contains only zeros
Var fill1 = VarDouble.fill(5);
// builds a variable of a given size which contains only ones
Var fill2 = VarDouble.fill(5, 1);
// numeric variable which contains the values copied from another variable
Var copy1 = VarDouble.copy(seq1);
// numeric variable with values copied from a collection
Normal normal = Normal.std();
List<Double> list1 = DoubleStream.generate(normal::sampleNext).limit(10).boxed().collect(Collectors.toList());
Var copy2 = VarDouble.copy(list1);
// numeric variable with values copied from a double array
Var copy3 = VarDouble.copy(1, 3, 4.0, 7);
// numeric variable with values copied from an int array
Var copy4 = VarDouble.copy(1, 3, 4, 7);
// numeric variables with values generated as the sqrt of the row number
Var from1 = VarDouble.from(10, Math::sqrt);
// numeric variable with values generated using a function which receives a row value
// as parameter and outputs a double value; in this case we generate values as
// a sum of the values of other two variables
Var from2 = VarDouble.from(4, row -> copy3.getDouble(row) + copy4.getDouble(row));
// numeric variable with values generated from values of another variable using
// a transformation provided via a lambda function
Var from3 = VarDouble.from(from1, x -> x + 1);
###Output
_____no_output_____
###Markdown
**Wrapper around a double array**This builder creates a new numeric variable instance as a wrapper around a double array of values. Notice that it is not the same as the copy builder, since in the wrapper case any change in the new numerical variable is reflected also in the original array of numbers. In the case of the copy builder this is not true, since the copy builder \(as its name implies\) creates an internal copy of the array.
###Code
double[] array = DoubleArrays.newFrom(1, 5, x -> x*x);
Var wrap1 = VarDouble.wrap(array);
wrap1.printString();
array[2] = 17;
wrap1.printString();
###Output
VarDouble [name:"?", rowCount:4, values: 1.0, 4.0, 9.0, 16.0]
VarDouble [name:"?", rowCount:4, values: 1.0, 4.0, 17.0, 16.0]
###Markdown
Printing variables Most of the objects which contains information implements the `Printable` interface. This interface allows one to display a summary of the content of the given object. This is the case also with the numerical variables. Additionally, the numerical variables implements also two other methods, one which displays all the values and another one which displays only the first values.
###Code
// build a numerical variable with values as the sqrt
// of the first 200 integer values
Var x = VarDouble.from(200, Math::sqrt).name("x");
// prints the text produced by toString
x.printString();
// print a reasonable part of value
x.printContent();
// print all values of the variable
x.printFullContent();
// print a summary of the content of the variable
x.printSummary();
WS.image(points(x, VarDouble.from(x.size(), Normal.std()::sampleNext), color(1), pch(3)), 600, 300);
###Output
_____no_output_____
###Markdown
VarOp interfaceThere are various mathemaical operations available under VarOp interface. The interface to those operators can be called using `op()` method on any variable. The fllowing examples uses some of those operators.
###Code
// computes the sum of all values in variable
x.op().nansum();
// apply a lambda function on a copy of the varialble
x.copy().op().apply(v -> Math.sqrt(v + 3./8)).printContent();
// add a constant to all values of a copy
x.copy().op().plus(Math.E);
###Output
_____no_output_____
###Markdown
Nominal variables Nominal variables are defined by`VarNominal`and contains string valued categories. Nominal variables offers integer and label representations. The label representation describes the categories as labels or texts, while integer representation is an integer indexe on categories. The index representation does not imply an order between categories/labels. Various builders Nominal variables can be built in various was and are handy shortcuts for various scenarios.
###Code
// creates an empty nominal variable with provided levels
var nom1 = VarNominal.empty(10, "a", "b");
// note the first label which is a placeholder for missing values
nom1.levels();
VarNominal.from(10, row -> row % 2 == 0 ? "even" : "odd").printString();
VarNominal.copy("a", "b", "c", "b").printContent()
Unique.of(VarNominal.copy("a","b","c","b"))
###Output
_____no_output_____
###Markdown
Overview of variable types All variables implements a common API making easy the manipulation of it's content in a generic way. However, depending on type, some variables might not implement some operations or rely back on specific implementations which makes sense for that variable type. For example, for a numeric variable it makes sense to set the value at some specific index in order to change it. For nominal variables, the same operation would not make sense. Instead of the meaning 'change numerical value at some given position' will have the following semantic 'change the string value to that category associated with the rounded integer value of the double parameter'. Let's see an example:
###Code
// we create a nominal value with label `a` of first position and label `b` on the second position
var nom = VarNominal.copy("a", "b");
nom.printString();
// set the value from the first position to the label which corresponds to indexed label 2, which is `b`
nom.setDouble(0, 2.1);
// let's see the result
nom.printString();
###Output
VarNominal [name:"?", rowCount:2, values: a, b]
VarNominal [name:"?", rowCount:2, values: b, b]
###Markdown
Missing values All variables offers API for missing values. A missing value is a special value which is used as placeholder for an unspecified value. We can have missing values for various reasons. There are cases when the data set does not contains some values because the experimenter did not collect it. Sometimes having a value does not make sense, for example a male cannot be pregnant, so measuring any metric related with pregnancy have missing value for male subjects. Also, missing values can appear as an effect of data manipulation operations, like joining two data frames which does not present a one-to-one presence relation.Missing values are different for each representation, which makes sense since a double value have a different type than a String.
###Code
WS.println(VarDouble.MISSING_VALUE);
WS.println(VarNominal.MISSING_VALUE);
WS.println(VarInt.MISSING_VALUE);
###Output
NaN
?
2.147483647E9
###Markdown
Most of time we do not have to deals directly with the missing value placeholders, since `Var` and `Frame` interfaces offers a way to handle gracefully missing value operations. Below is an illustrative example:
###Code
var x = VarDouble.seq(10).name("x");
x.printString();
// put missing on values from indexes 2,4 and 6
x.setMissing(2);
x.setMissing(4);
x.setMissing(6);
x.printString();
// count the number of non missing values
x.stream().complete().count();
// compute the sum of all non missing values
x.op().nansum();
###Output
_____no_output_____
###Markdown
Var Iterators Each `Var` allows easy data manipulation through iterators. There is a generic construct available for each variable implementation which under the form of `VSpot`. In the terminology used in `rapaio` a spot is a position in a variable which can contain a value. Since that value can have different reporesentation, the `VSpot` interface is used to manipulate what happen in a given position. Additionally there are various iterators which can be used for other data representations. The example below are illustrative:
###Code
var d = VarDouble.seq(3).name("d");
// iterate through double values, we can do this because we have the specific type VarDouble
for(double value : d) WS.println(value)
// for each spot print the string representation
d.forEachSpot(s -> WS.println(s.getLabel()))
// compute the sum using spot iterator and streaming API
WS.println(d.stream().mapToDouble(VSpot::getDouble).sum());
WS.println(d.op().nansum());
// display the row indexes of all values which are missing
var y = VarDouble.copy(1, Double.NaN, 2, Double.NaN, 3);
// collect indexes to an int array
int[] indexes = y.stream().filter(s -> !s.isMissing()).mapToInt(s -> s.row()).toArray();
// create a var int wrapper to see the content
VarInt.wrap(indexes).printString();
###Output
VarInt [name:"?", rowCount:3, values: 0, 2, 4]
|
Understand_data.ipynb | ###Markdown
Understanding the data:
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv("sms-call-internet-mi-2013-11-01.txt", sep = "\t", header= None)
columns = ["grid_square", "time", "cc", "sms_in", "sms_out", "call_in", "call_out", "internet"]
df.columns = columns
grouped = df.groupby(["time", "grid_square"])
groups = grouped.groups.keys()
groups = list(groups)
len(groups)
g1 = grouped.get_group(groups[12])
g1.head()
###Output
_____no_output_____
###Markdown
Different country codes found in the dataset
###Code
print(np.sort(df.cc.unique()))
###Output
[ 0 1 7 20 27 30 31 32 33 34 36 39
40 41 43 44 45 46 47 48 49 51 52 53
54 55 56 57 58 60 61 62 63 64 65 66
81 82 84 86 90 91 92 93 94 95 98 211
212 213 216 218 220 221 222 223 224 225 226 227
228 229 230 231 232 233 234 235 236 237 238 239
240 241 242 243 244 245 247 248 249 250 251 254
255 256 257 258 260 261 262 263 264 265 266 267
290 291 297 298 350 351 352 353 354 355 356 357
358 359 370 371 372 373 374 375 376 377 378 379
380 381 382 385 386 387 389 420 421 423 500 502
503 504 505 506 507 509 590 591 593 594 595 596
598 599 675 676 677 690 852 853 855 856 870 880
886 960 961 962 963 964 965 966 967 968 970 971
972 973 974 975 976 977 992 993 994 995 996 998
1204 1214 1226 1246 1250 1284 1340 1403 1416 1438 1450 1514
1519 1579 1587 1604 1613 1647 1670 1671 1705 1709 1721 1758
1778 1780 1787 1807 1808 1809 1829 1849 1905 1907 1926 1938
1939 7701 7702 7705 7707 7711 7717 7727 7771 7775 7777 7778
8816 8817 14413 18092 18093 18094 18096 18097 18098 18099 18686 18762
18763 18768 68572 88235 88239 97259]
###Markdown
By searching we have found that these are different country codes, our interest is cdrs generated inside the country of italy, with country cc 39.
###Code
grouped.count()["cc"].max()
grouped.count()["cc"].min()
###Output
_____no_output_____
###Markdown
that means that at least there are 1 measurement inside one country in a single time interval, but maybe more than one measurment for different country codes.
###Code
groups = list(groups)
groups
ccs = df.groupby(["cc"])
codes = list(ccs.groups)
ccs.get_group(codes[0])
ccs.get_group(codes[1])
ccs.get_group(codes[3])
ccs.get_group(39)
###Output
_____no_output_____
###Markdown
by these investigations we found that the measurements of other cc isn't measured in all time intervals. Italy Data:
###Code
italy_df = df[df["cc"] == 39]
italy_df.head()
italy_df.shape
###Output
_____no_output_____
###Markdown
If our assumtion is true, if we grouped the italy data with time and geid sqaures the shape of the data mustn't be changed, because we have only one measurement in one time interval inside a grid square with the same CC
###Code
time_italy = italy_df.groupby(["time", "grid_square"])
time_italy.head()
###Output
_____no_output_____
###Markdown
the same shape means our assumption is true, now let's see the dominant CDRS.
###Code
means = time_italy[["sms_in", "sms_out", "call_in", "call_out", "internet"]].mean().mean()
cdrs = ["sms_in", "sms_out", "call_in", "call_out", "internet"]
cdrs_means = [means["sms_in"],means["sms_out"],means["call_in"],means["call_out"],means["internet"]]
figure, ax = plt.subplots(figsize = (10,4))
ax.set_title("Different CDR means")
# ax.figure(figsize = (10,5))
ax.bar(cdrs,cdrs_means );
###Output
_____no_output_____ |
notebook/2018-03-16_explore_cell_cutoffs.ipynb | ###Markdown
Explore Cell Cutoffs We are unsatisfied with out current cutoffs for calling a cell vs background. We think cell ranger's cutoff is too arbitrary, so I need to import the unfiltered data and figure out our own filtering criteria.**Note: Requires 64Gb of RAM to run this notebook.**
###Code
import os
import sys
from pathlib import Path
from IPython.display import display, HTML, Markdown
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import tables
# Project level imports
sys.path.insert(0, '../lib')
from larval_gonad.notebook import Nb
from larval_gonad.plotting import make_figs
from larval_gonad.config import memory
from larval_gonad.io import cellranger_counts, cellranger_umi
# Setup notebook
nbconfig = Nb.setup_notebook()
###Output
last updated: 2018-03-20
Git hash: c08af6a646e691e17b6633097ebb3f2fc1ba0ce4
###Markdown
Get UMI info
###Code
fname = '../output/testis1_force/outs/molecule_info.h5'
umi = cellranger_umi(fname)
dat = umi.groupby("cell_id").umi.size().sort_values(ascending=False).to_frame()
dat['X'] = list(range(1, len(dat) + 1))
fig, ax = plt.subplots(1, 1)
ax.plot(dat['X'], dat['umi'])
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Cell Count (log10)')
ax.set_ylabel('UMI Count (log10)')
ax.set_title('Barcode Rank Plot')
xlim = ax.get_xlim()
ylim = ax.get_xlim()
###Output
_____no_output_____
###Markdown
Filter based on UMI Value
###Code
filtered = dat.query('umi > 1000')
cell_ids_enc = filtered.index.unique().tolist()
len(cell_ids_enc)
fig, ax = plt.subplots(1, 1)
ax.plot(filtered['X'], filtered['umi'])
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Cell Count (log10)')
ax.set_ylabel('UMI Count (log10)')
ax.set_title('Barcode UMI Plot')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
###Output
_____no_output_____
###Markdown
Create filtered dataset
###Code
fname = '../output/testis1_force/outs/raw_gene_bc_matrices_h5.h5'
raw = cellranger_counts(fname, barcodes=cell_ids_enc)
raw.shape
sns.kdeplot(raw.sum())
sns.kdeplot(raw.sum(axis=1))
raw.to_csv('../output/testis1_force/raw_umi_filtered.tsv', sep='\t')
###Output
_____no_output_____ |
experiments/scenario_x5/notebooks/data_overview.ipynb | ###Markdown
Read
###Code
df_clients = pd.read_csv('../data/clients.csv')
df_products = pd.read_csv('../data/products.csv')
df_purchases = pd.read_csv('../data/purchases.csv')
df_train = pd.read_csv('../data/uplift_train.csv')
###Output
_____no_output_____
###Markdown
Transformations
###Code
df_purchases['transaction_datetime'] = pd.to_datetime(df_purchases['transaction_datetime'])
df_purchases['transaction_month'] = \
df_purchases['transaction_datetime'].dt.floor('D') - \
pd.to_timedelta(df_purchases['transaction_datetime'].dt.day - 1, 'D')
###Output
_____no_output_____
###Markdown
Train target
###Code
df_train.assign(cnt=1).pivot_table(index='treatment_flg', columns='target', aggfunc='sum')
###Output
_____no_output_____
###Markdown
Clients
###Code
df_clients.head()
df_clients['client_id'].nunique()
df_clients['age'].hist(bins=20, figsize=(12, 3))
df_clients['age'].clip(0, 100).hist(bins=20, figsize=(12, 3))
df_clients['age'].quantile([0.25, 0.50, 0.75])
df_clients['gender'].value_counts()
df_clients['age_bin'] = np.where(
(df_clients['age'] < 10) | (df_clients['age'] > 90), -1, np.where(
df_clients['age'] < 35, 0, np.where(
df_clients['age'] < 45, 1, np.where(
df_clients['age'] < 60, 2, 3))))
df_clients['age_bin'].value_counts().sort_index()
df_clients.groupby(['age_bin', 'gender'])['client_id'].count().unstack()
###Output
_____no_output_____
###Markdown
Products
###Code
df_products
df_products.count()
df_products['level_1'].value_counts()
df_products['level_2'].value_counts()
df_products['level_3'].value_counts()
df_products['level_4'].value_counts()
df_products['segment_id'].value_counts()
df_products['brand_id'].value_counts()
df_products['vendor_id'].value_counts()
_, (ax0, ax1) = plt.subplots(1, 2, figsize=(14, 4))
df_products['netto'].clip(None, 2).hist(ax=ax0)
np.log1p(df_products['netto']).clip(None, 1).hist(ax=ax1)
plt.show()
###Output
_____no_output_____
###Markdown
Purchases
###Code
df_purchases
df_purchases.count()
df_purchases.groupby(['transaction_id', 'client_id'])['transaction_datetime'].count().hist(bins=20, figsize=(12, 4))
df_purchases['client_id'].nunique()
df_purchases.groupby(['transaction_id', 'client_id'])['transaction_datetime'].count().quantile([0.15, 0.95])
for col in [
# 'regular_points_received', 'express_points_received',
# 'regular_points_spent', 'express_points_spent',
# 'purchase_sum',
'product_quantity', 'trn_sum_from_iss', 'trn_sum_from_red',
]:
_, (ax0, ax1) = plt.subplots(1, 2, figsize=(14, 4))
df_purchases[col].hist(ax=ax0)
(np.log1p(abs(df_purchases[col])) * np.sign(df_purchases[col])).hist(ax=ax1)
plt.suptitle(col)
plt.show()
###Output
_____no_output_____
###Markdown
Transactions
###Code
df_trx = df_purchases \
.drop(columns=['product_id', 'product_quantity', 'trn_sum_from_iss', 'trn_sum_from_red']) \
.drop_duplicates()
df_trx
(df_trx['transaction_id'] + '-' + df_trx['client_id']).value_counts()
df_trx.count()
df_trx.groupby('transaction_month')['transaction_id'].count().plot(figsize=(12, 4), kind='bar')
df_trx['store_id'].value_counts().reset_index(drop=True).plot(figsize=(12, 4), grid=True)
df_trx.groupby('client_id')['transaction_id'].count().clip(None, 150).hist(bins=20, figsize=(12, 4))
plt.title('Trx per client')
plt.show()
df_trx.groupby('client_id')['transaction_id'].count().quantile([0.15, 0.95])
df_purchases.groupby('client_id')['transaction_id'].count().clip(None, 800).hist(bins=20, figsize=(12, 4))
plt.title('Products per client')
plt.show()
df_purchases.groupby('client_id')['transaction_id'].count().quantile([0.15, 0.95])
df_trx.sort_values(['client_id', 'transaction_datetime']).set_index('client_id') \
.groupby(level='client_id')['transaction_datetime'].diff()
df_trx.sort_values(['client_id', 'transaction_datetime']).set_index('client_id') \
.groupby(level='client_id')['transaction_datetime'].diff().dropna() \
.astype(int).div(1e9 * 24 * 60 * 60).clip(None, 30).hist(bins=20, figsize=(12, 4))
for col in [
'regular_points_received', 'express_points_received',
'regular_points_spent', 'express_points_spent',
'purchase_sum',
# 'product_quantity', 'trn_sum_from_iss', 'trn_sum_from_red',
]:
_, (ax0, ax1) = plt.subplots(1, 2, figsize=(14, 4))
df_trx[col].hist(ax=ax0)
(np.log1p(abs(df_trx[col])) * np.sign(df_trx[col])).hist(ax=ax1)
plt.suptitle(col)
plt.show()
###Output
_____no_output_____
###Markdown
Time periods
###Code
# choose top N products and check it over time
top_n_products = df_purchases.groupby('product_id')['transaction_id'].count().sort_values().iloc[-15:].index.tolist()
top_n_products
df_purchases[lambda x: x['product_id'].isin(top_n_products)] \
.groupby(['transaction_month', 'product_id'])['transaction_id'].count().unstack() \
.plot(figsize=(12, 4), kind='bar', legend=False)
!du -sh ../data/*.parquet
###Output
84M ../data/test_trx.parquet
722M ../data/train_trx.parquet
|
docs/tutorials/4_drivers_tutorial.ipynb | ###Markdown
Copyright 2018 The TF-Agents Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Drivers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Setup If you haven't installed tf-agents or gym yet, run:
###Code
!pip install --pre tf-agents[reverb]
!pip install gym
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
###Output
_____no_output_____
###Markdown
Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2021 The TF-Agents Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Drivers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Setup If you haven't installed tf-agents or gym yet, run:
###Code
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
###Output
_____no_output_____
###Markdown
Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2018 The TF-Agents Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Drivers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Setup
###Code
try:
%tensorflow_version 2.x
except:
pass
###Output
_____no_output_____
###Markdown
If you haven't installed tf-agents or gym yet, run:
###Code
!pip install --upgrade tensorflow-probability
!pip install tf-agents
!pip install gym
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
###Output
_____no_output_____
###Markdown
Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2021 The TF-Agents Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Drivers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Setup If you haven't installed tf-agents or gym yet, run:
###Code
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
###Output
_____no_output_____
###Markdown
Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2018 The TF-Agents Authors. Get Started Run in Google Colab View source on GitHub
###Code
# Note: If you haven't installed tf-agents or gym yet, run:
try:
%tensorflow_version 2.x
except:
pass
!pip install tf-agents
!pip install gym
###Output
_____no_output_____
###Markdown
Imports
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
###Output
_____no_output_____
###Markdown
IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2018 The TF-Agents Authors. Get Started Run in Google Colab View source on GitHub
###Code
# Note: If you haven't installed tf-agents or gym yet, run:
try:
%tensorflow_version 2.x
except:
pass
!pip install --upgrade tensorflow-probability
!pip install tf-agents
!pip install gym
###Output
_____no_output_____
###Markdown
Imports
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
###Output
_____no_output_____
###Markdown
IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2021 The TF-Agents Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Drivers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Setup If you haven't installed tf-agents or gym yet, run:
###Code
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_resource_variables()
###Output
_____no_output_____
###Markdown
Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____
###Markdown
Copyright 2018 The TF-Agents Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Drivers View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook IntroductionA common pattern in reinforcement learning is to execute a policy in an environment for a specified number of steps or episodes. This happens, for example, during data collection, evaluation and generating a video of the agent.While this is relatively straightforward to write in python, it is much more complex to write and debug in TensorFlow because it involves `tf.while` loops, `tf.cond` and `tf.control_dependencies`. Therefore we abstract this notion of a run loop into a class called `driver`, and provide well tested implementations both in Python and TensorFlow.Additionally, the data encountered by the driver at each step is saved in a named tuple called Trajectory and broadcast to a set of observers such as replay buffers and metrics. This data includes the observation from the environment, the action recommended by the policy, the reward obtained, the type of the current and the next step, etc. Setup If you haven't installed tf-agents or gym yet, run:
###Code
!pip install --upgrade tensorflow-probability
!pip install tf-agents
!pip install gym
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.policies import random_py_policy
from tf_agents.policies import random_tf_policy
from tf_agents.metrics import py_metrics
from tf_agents.metrics import tf_metrics
from tf_agents.drivers import py_driver
from tf_agents.drivers import dynamic_episode_driver
tf.compat.v1.enable_v2_behavior()
###Output
_____no_output_____
###Markdown
Python DriversThe `PyDriver` class takes a python environment, a python policy and a list of observers to update at each step. The main method is `run()`, which steps the environment using actions from the policy until at least one of the following termination criteria is met: The number of steps reaches `max_steps` or the number of episodes reaches `max_episodes`.The implementation is roughly as follows:```pythonclass PyDriver(object): def __init__(self, env, policy, observers, max_steps=1, max_episodes=1): self._env = env self._policy = policy self._observers = observers or [] self._max_steps = max_steps or np.inf self._max_episodes = max_episodes or np.inf def run(self, time_step, policy_state=()): num_steps = 0 num_episodes = 0 while num_steps < self._max_steps and num_episodes < self._max_episodes: Compute an action using the policy for the given time_step action_step = self._policy.action(time_step, policy_state) Apply the action to the environment and get the next step next_time_step = self._env.step(action_step.action) Package information into a trajectory traj = trajectory.Trajectory( time_step.step_type, time_step.observation, action_step.action, action_step.info, next_time_step.step_type, next_time_step.reward, next_time_step.discount) for observer in self._observers: observer(traj) Update statistics to check termination num_episodes += np.sum(traj.is_last()) num_steps += np.sum(~traj.is_boundary()) time_step = next_time_step policy_state = action_step.state return time_step, policy_state```Now, let us run through the example of running a random policy on the CartPole environment, saving the results to a replay buffer and computing some metrics.
###Code
env = suite_gym.load('CartPole-v0')
policy = random_py_policy.RandomPyPolicy(time_step_spec=env.time_step_spec(),
action_spec=env.action_spec())
replay_buffer = []
metric = py_metrics.AverageReturnMetric()
observers = [replay_buffer.append, metric]
driver = py_driver.PyDriver(
env, policy, observers, max_steps=20, max_episodes=1)
initial_time_step = env.reset()
final_time_step, _ = driver.run(initial_time_step)
print('Replay Buffer:')
for traj in replay_buffer:
print(traj)
print('Average Return: ', metric.result())
###Output
_____no_output_____
###Markdown
TensorFlow DriversWe also have drivers in TensorFlow which are functionally similar to Python drivers, but use TF environments, TF policies, TF observers etc. We currently have 2 TensorFlow drivers: `DynamicStepDriver`, which terminates after a given number of (valid) environment steps and `DynamicEpisodeDriver`, which terminates after a given number of episodes. Let us look at an example of the DynamicEpisode in action.
###Code
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
tf_policy = random_tf_policy.RandomTFPolicy(action_spec=tf_env.action_spec(),
time_step_spec=tf_env.time_step_spec())
num_episodes = tf_metrics.NumberOfEpisodes()
env_steps = tf_metrics.EnvironmentSteps()
observers = [num_episodes, env_steps]
driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env, tf_policy, observers, num_episodes=2)
# Initial driver.run will reset the environment and initialize the policy.
final_time_step, policy_state = driver.run()
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
# Continue running from previous state
final_time_step, _ = driver.run(final_time_step, policy_state)
print('final_time_step', final_time_step)
print('Number of Steps: ', env_steps.result().numpy())
print('Number of Episodes: ', num_episodes.result().numpy())
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.