prompt
stringlengths 501
4.98M
| target
stringclasses 1
value | chunk_prompt
bool 1
class | kind
stringclasses 2
values | prob
float64 0.2
0.97
⌀ | path
stringlengths 10
394
⌀ | quality_prob
float64 0.4
0.99
⌀ | learning_prob
float64 0.15
1
⌀ | filename
stringlengths 4
221
⌀ |
---|---|---|---|---|---|---|---|---|
# Control
In this notebook we want to control the chaos in the Henon map. The Henon map is defined by
$$
\begin{align}
x_{n+1}&=1-ax_n^2+y_n\\
y_{n+1}&=bx_n
\end{align}.
$$
```
from plotly import offline as py
from plotly import graph_objs as go
py.init_notebook_mode(connected=True)
```
### Fixed points
First we need to find the fixed points of the Henon map. From $y_n=y_{n+1}=bx_n$ we can elliminate $y_n$ in the first equation. The quadratic equation obtained after ellimination with $x_n=x_{n+1}$ yields,
$$
\begin{align}
x^*=\frac{b-1\pm\sqrt{4a+(b-1)^2}}{2a},
&&
y^*=bx^*,
\end{align}
$$
as the fixed points of the Henon map.
```
def henon_map(x0, y0, a, b, N):
x = [x0]
y = [y0]
for i in range(N):
xn = x[-1]
yn = y[-1]
x.append(1 - a * xn**2 + yn)
y.append(b * xn)
return x, y
def fixed_points(a, b):
u = (b - 1) / (2 * a)
v = np.sqrt(4 * a + (b - 1)**2) / (2 * a)
x1 = u - v
x2 = u + v
y1 = b * x1
y2 = b * x2
return [(x1, y1), (x2, y2)]
((xf1, yf1), (xf2, yf2)) = fixed_points(a=1.4, b=0.3)
radius = 0.1
layout = go.Layout(
title='Henon Attractor',
xaxis=dict(title='x'),
yaxis=dict(title='y', scaleanchor='x'),
showlegend=False,
shapes=[
{
'type': 'circle',
'xref': 'x',
'yref': 'y',
'x0': xf1 + radius,
'y0': yf1 + radius,
'x1': xf1 - radius,
'y1': yf1 - radius,
'line': { 'color': 'gray' },
},
{
'type': 'circle',
'xref': 'x',
'yref': 'y',
'x0': xf2 + radius,
'y0': yf2 + radius,
'x1': xf2 - radius,
'y1': yf2 - radius,
},
]
)
x = []
y = []
for i in range(50):
x0, y0 = np.random.uniform(0.2, 0.8, 2)
xx, yy = henon_map(x0, y0, a=1.4, b=0.3, N=100)
if np.abs(xx[-1]) < 10 and np.abs(yy[-1]) < 10:
x += xx
y += yy
figure = go.Figure([
go.Scatter(x=x, y=y, mode='markers', marker=dict(size=3))
], layout)
py.iplot(figure)
```
So the second fixed point (positive sign) sits on the attractor.
```
def fixed_point(a, b):
return fixed_points(a, b)[1]
fixed_point(a=1.4, b=0.3)
```
We assume that coordinates and parameters are sufficiently close such that the following Taylor expansion is valid,$$
\boldsymbol{x}_{n+1}
=
\boldsymbol{F}\left(\boldsymbol{x}^*,\boldsymbol{r}_0\right)
+
\frac{d\boldsymbol{F}}{d\boldsymbol{x}_n}\Bigr|_{\boldsymbol{x}^*,\boldsymbol{r}_0}\left(\boldsymbol{x}_n-\boldsymbol{x}^*\right)
+
\frac{d\boldsymbol{F}}{d\boldsymbol{r}_n}\Bigr|_{\boldsymbol{x}^*,\boldsymbol{r}_0}\left(\boldsymbol{r}_n-\boldsymbol{r}_0\right).$$
In the regime where these linear approximations are valid we can use, $$
\Delta\boldsymbol{r}_n
=
\gamma\left(\boldsymbol{x}_n-\boldsymbol{x}^*\right). $$
Further introducing $\Delta\boldsymbol{x}_n=\boldsymbol{x}_n-\boldsymbol{x}^*$ we can rewrite the map as, $$
\Delta\boldsymbol{x}_{n+1}
=
\underbrace{\left(
\frac{d\boldsymbol{F}}{d\boldsymbol{x}_n}\Bigr|_{\boldsymbol{x}^*,\boldsymbol{r}_0}
+
\frac{d\boldsymbol{F}}{d\boldsymbol{r}_n}\Bigr|_{\boldsymbol{x}^*,\boldsymbol{r}_0}
\right)}_{A}
\Delta\boldsymbol{x}_n.$$
The Jacobians are $$
\begin{align}
\frac{d\boldsymbol{F}}{d\boldsymbol{x}_n}\Bigr|_{\boldsymbol{x}^*,\boldsymbol{r}_0}
=
\begin{pmatrix}
-2 a_0 x^* & 1 \\
b_0 & 0
\end{pmatrix},
&&
\frac{d\boldsymbol{F}}{d\boldsymbol{r}_n}\Bigr|_{\boldsymbol{x}^*,\boldsymbol{r}_0}
=
\begin{pmatrix}
-{x^*}^2 & 0 \\
0 & x^*
\end{pmatrix}
\end{align}. $$
Thus the matrix $A$ reads, $$
A
=
\begin{pmatrix}
-2a_0x^*-\gamma{x^*}^2 & 1 \\
b_0 & \gamma x^*
\end{pmatrix}.
$$ The optimal value for $\gamma$ can be found for $0=A\Delta\boldsymbol{x}_n$.
```
def eigenvector(a, b):
xf, yf = fixed_point(a, b)
A = np.array([
[-2 * a * xf - xf**2, 1],
[b, xf]
])
return u-v, u+v
eigenvalues(a=1.4, b=0.3)
```
The Jacobian of the Henon map close to $(x^*,a_0,b_0)$ is given through, $$
\begin{pmatrix}
-2 a_0 x^* & 1 \\
b_0 & 0
\end{pmatrix},$$
and has eigenvalues $$\lambda=-a_0\left[x^*\pm\sqrt{{x^*}^2+b_0/a_0^2}\right]$$
```
fixed_point(a=1.4, b=0.3)
```
| true |
code
| 0.547101 | null | null | null | null |
|
# Single model
```
from consav import runtools
runtools.write_numba_config(disable=0,threads=4)
%matplotlib inline
%load_ext autoreload
%autoreload 2
# Local modules
from Model import RetirementClass
import SimulatedMinimumDistance as SMD
import figs
import funs
# Global modules
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
```
### Solve and simulate model
```
tic1 = time.time()
Single = RetirementClass()
tic2 = time.time()
Single.recompute()
tic3 = time.time()
Single.solve()
tic4 = time.time()
Single.simulate(accuracy=True,tax=True)
tic5 = time.time()
print('Class :', round(tic2-tic1,2))
print('Precompute:', round(tic3-tic2,2))
print('Solve :', round(tic4-tic3,2))
print('Simulate :', round(tic5-tic4,2))
tic1 = time.time()
Single.solve()
tic2 = time.time()
Single.simulate(accuracy=True,tax=True)
tic3 = time.time()
print('Solve :', round(tic2-tic1,2))
print('Simulate :', round(tic3-tic2,2))
```
### Retirement probabilities from solution
Women
```
G = figs.choice_probs(Single,ma=0)
G['legendsize'] = 12
G['marker'] = 'o'
figs.MyPlot(G,linewidth=3).savefig('figs/Model/Single_ChoiceProb_Women.png')
```
Men
```
G = figs.choice_probs(Single,ma=1)
G['legendsize'] = 12
G['marker'] = 'o'
figs.MyPlot(G,linewidth=3).savefig('figs/Model/Single_ChoiceProb_Men.png')
```
### Simulation
```
def rename_gender(G_lst):
G_lst[0]['label'] = ['Women']
G_lst[1]['label'] = ['Men']
936092.2561647706 - np.nansum(Single.sol.c)
37833823.081779644 - np.nansum(Single.sol.v)
print(np.nansum(Single.par.labor))
print(np.nansum(Single.par.erp))
print(np.nansum(Single.par.oap))
Single.par.T_erp
68.51622393567519 - np.nansum(Single.par.erp)
Single.par.pension_male = np.array([10.8277686, 18.94859504])
Single.par.pension_female = np.array([ 6.6438835, 11.62679612])
transitions.precompute_inc_single(Single.par)
Single.solve()
Single.simulate()
Single.par.start_T = 53
Single.par.simT = Single.par.end_T - Single.par.start_T + 1
Single.par.var = np.array([0.202, 0.161])
Single.par.reg_labor_male = np.array((1.166, 0.360, 0.432, -0.406))
Single.par.reg_labor_female = np.array((4.261, 0.326, 0.303, -0.289))
Single.par.priv_pension_female = 728*1000/Single.par.denom
Single.par.priv_pension_male = 1236*1000/Single.par.denom
Single.solve(recompute=True)
Single.simulate()
np.nanmean(Single.sim.m[:,0])
Single.sim.m[:,0] = 20
Single.simulate()
Gw = figs.retirement_probs(Single,MA=[0])
Gm = figs.retirement_probs(Single,MA=[1])
rename_gender([Gw,Gm])
figs.MyPlot([Gw,Gm],linewidth=3).savefig('figs/Model/SimSingleProbs')
Gw = figs.retirement_probs(Single,MA=[0])
Gm = figs.retirement_probs(Single,MA=[1])
rename_gender([Gw,Gm])
figs.MyPlot([Gw,Gm],linewidth=3).savefig('figs/Model/SimSingleProbs')
Gw = figs.retirement_probs(Single,MA=[0])
Gm = figs.retirement_probs(Single,MA=[1])
rename_gender([Gw,Gm])
figs.MyPlot([Gw,Gm],linewidth=3).savefig('figs/Model/SimSingleProbs')
Gw = figs.lifecycle(Single,var='m',MA=[0],ages=[57,80])
Gm = figs.lifecycle(Single,var='m',MA=[1],ages=[57,80])
rename_gender([Gw,Gm])
figs.MyPlot([Gw,Gm],linewidth=3,save=False)
Gw = figs.lifecycle(Single,var='c',MA=[0],ages=[57,80])
Gm = figs.lifecycle(Single,var='c',MA=[1],ages=[57,80])
rename_gender([Gw,Gm])
figs.MyPlot([Gw,Gm],linewidth=3,save=False)
```
### Consumption functions
Retired
```
G = figs.policy(Single,var='c',T=list(range(77,87))[::2],MA=[0],ST=[3],RA=[0],D=[0],label=['t'])
G['legendsize'] = 12
figs.MyPlot(G,ylim=[0,12],save=False)
G = figs.policy(Single,var='c',T=list(range(97,111))[::2],MA=[0],ST=[3],RA=[0],D=[0],label=['t'])
G['legendsize'] = 12
figs.MyPlot(G,ylim=[0,16],save=False)
```
Working
```
G = figs.policy(Single,var='c',T=list(range(57,67))[::2],MA=[0],ST=[3],RA=[0],D=[1],label=['t'])
G['legendsize'] = 12
figs.MyPlot(G,ylim=[0,8],save=False)
G = figs.policy(Single,var='c',T=list(range(67,75))[::2],MA=[0],ST=[3],RA=[0],D=[1],label=['t'])
G['legendsize'] = 12
figs.MyPlot(G,ylim=[0,10],save=False)
```
### Simulation - Retirement
```
def rename(G_lst):
G_lst[0]['label'] = ['High skilled']
G_lst[1]['label'] = ['Base']
G_lst[2]['label'] = ['Low skilled']
```
Women
```
G_hs = figs.retirement_probs(Single,MA=[0],ST=[1,3])
G_base = figs.retirement_probs(Single,MA=[0])
G_ls = figs.retirement_probs(Single,MA=[0],ST=[0,2])
rename([G_hs,G_base,G_ls])
figs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)
```
Men
```
G_hs = figs.retirement_probs(Single,MA=[1],ST=[1,3])
G_base = figs.retirement_probs(Single,MA=[1])
G_ls = figs.retirement_probs(Single,MA=[1],ST=[0,2])
rename([G_hs,G_base,G_ls])
figs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)
```
### Simulation - Consumption
Women
```
G_hs = figs.lifecycle(Single,var='c',MA=[0],ST=[1,3],ages=[57,80])
G_base = figs.lifecycle(Single,var='c',MA=[0],ages=[57,80])
G_ls = figs.lifecycle(Single,var='c',MA=[0],ST=[0,2],ages=[57,80])
rename([G_hs,G_base,G_ls])
figs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)
```
Men
```
G_hs = figs.lifecycle(Single,var='c',MA=[1],ST=[1,3],ages=[57,80])
G_base = figs.lifecycle(Single,var='c',MA=[1],ages=[57,80])
G_ls = figs.lifecycle(Single,var='c',MA=[1],ST=[0,2],ages=[57,80])
rename([G_hs,G_base,G_ls])
figs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)
```
### Simulation - Wealth
Women
```
G_hs = figs.lifecycle(Single,var='m',MA=[0],ST=[1,3],ages=[57,68])
G_base = figs.lifecycle(Single,var='m',MA=[0],ages=[57,68])
G_ls = figs.lifecycle(Single,var='m',MA=[0],ST=[0,2],ages=[57,68])
rename([G_hs,G_base,G_ls])
figs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)
```
Men
```
G_hs = figs.lifecycle(Single,var='m',MA=[1],ST=[1,3],ages=[57,68])
G_base = figs.lifecycle(Single,var='m',MA=[1],ages=[57,68])
G_ls = figs.lifecycle(Single,var='m',MA=[1],ST=[0,2],ages=[57,68])
rename([G_hs,G_base,G_ls])
figs.MyPlot([G_hs,G_base,G_ls],linewidth=3,save=False)
```
### Euler errors
```
MA = [0,1]
ST = [0,1,2,3]
ages = [Single.par.start_T,Single.par.end_T-1]
for ma in MA:
for st in ST:
funs.log_euler(Single,MA=[ma],ST=[st],ages=ages,plot=True)
print('Total:',funs.log_euler(Single,ages=ages)[0])
MA = [0,1]
ST = [0,1,2,3]
ages = [Single.par.start_T,Single.par.end_T-1]
for ma in MA:
for st in ST:
funs.log_euler(Single,MA=[ma],ST=[st],ages=ages,plot=True)
print('Total:',funs.log_euler(Single,ages=ages)[0])
Na = Single.par.Na
funs.resolve(Single,Na=np.linspace(50,1000))
Single.par.Na = Na
Single.recompute() # reset
a_phi = test.par.a_phi
funs.resolve(test,a_phi = np.linspace(1.0,2.0,num=10))
test.par.a_phi = a_phi
test.solve(recompute=True) # reset
```
| true |
code
| 0.370168 | null | null | null | null |
|
# CNTK 201A Part A: CIFAR-10 Data Loader
This tutorial will show how to prepare image data sets for use with deep learning algorithms in CNTK. The CIFAR-10 dataset (http://www.cs.toronto.edu/~kriz/cifar.html) is a popular dataset for image classification, collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. It is a labeled subset of the [80 million tiny images](http://people.csail.mit.edu/torralba/tinyimages/) dataset.
The CIFAR-10 dataset is not included in the CNTK distribution but can be easily downloaded and converted to CNTK-supported format
CNTK 201A tutorial is divided into two parts:
- Part A: Familiarizes you with the CIFAR-10 data and converts them into CNTK supported format. This data will be used later in the tutorial for image classification tasks.
- Part B: We will introduce image understanding tutorials.
If you are curious about how well computers can perform on CIFAR-10 today, Rodrigo Benenson maintains a [blog](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130) on the state-of-the-art performance of various algorithms.
```
from __future__ import print_function
from PIL import Image
import getopt
import numpy as np
import pickle as cp
import os
import shutil
import struct
import sys
import tarfile
import xml.etree.cElementTree as et
import xml.dom.minidom
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
# Config matplotlib for inline plotting
%matplotlib inline
```
## Data download
The CIFAR-10 dataset consists of 60,000 32x32 color images in 10 classes, with 6,000 images per class.
There are 50,000 training images and 10,000 test images. The 10 classes are: airplane, automobile, bird,
cat, deer, dog, frog, horse, ship, and truck.
```
# CIFAR Image data
imgSize = 32
numFeature = imgSize * imgSize * 3
```
We first setup a few helper functions to download the CIFAR data. The archive contains the files data_batch_1, data_batch_2, ..., data_batch_5, as well as test_batch. Each of these files is a Python "pickled" object produced with cPickle. To prepare the input data for use in CNTK we use three oprations:
> `readBatch`: Unpack the pickle files
> `loadData`: Compose the data into single train and test objects
> `saveTxt`: As the name suggests, saves the label and the features into text files for both training and testing.
```
def readBatch(src):
with open(src, 'rb') as f:
if sys.version_info[0] < 3:
d = cp.load(f)
else:
d = cp.load(f, encoding='latin1')
data = d['data']
feat = data
res = np.hstack((feat, np.reshape(d['labels'], (len(d['labels']), 1))))
return res.astype(np.int)
def loadData(src):
print ('Downloading ' + src)
fname, h = urlretrieve(src, './delete.me')
print ('Done.')
try:
print ('Extracting files...')
with tarfile.open(fname) as tar:
tar.extractall()
print ('Done.')
print ('Preparing train set...')
trn = np.empty((0, numFeature + 1), dtype=np.int)
for i in range(5):
batchName = './cifar-10-batches-py/data_batch_{0}'.format(i + 1)
trn = np.vstack((trn, readBatch(batchName)))
print ('Done.')
print ('Preparing test set...')
tst = readBatch('./cifar-10-batches-py/test_batch')
print ('Done.')
finally:
os.remove(fname)
return (trn, tst)
def saveTxt(filename, ndarray):
with open(filename, 'w') as f:
labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
for row in ndarray:
row_str = row.astype(str)
label_str = labels[row[-1]]
feature_str = ' '.join(row_str[:-1])
f.write('|labels {} |features {}\n'.format(label_str, feature_str))
```
In addition to saving the images in the text format, we would save the images in PNG format. In addition we also compute the mean of the image. `saveImage` and `saveMean` are two functions used for this purpose.
```
def saveImage(fname, data, label, mapFile, regrFile, pad, **key_parms):
# data in CIFAR-10 dataset is in CHW format.
pixData = data.reshape((3, imgSize, imgSize))
if ('mean' in key_parms):
key_parms['mean'] += pixData
if pad > 0:
pixData = np.pad(pixData, ((0, 0), (pad, pad), (pad, pad)), mode='constant', constant_values=128)
img = Image.new('RGB', (imgSize + 2 * pad, imgSize + 2 * pad))
pixels = img.load()
for x in range(img.size[0]):
for y in range(img.size[1]):
pixels[x, y] = (pixData[0][y][x], pixData[1][y][x], pixData[2][y][x])
img.save(fname)
mapFile.write("%s\t%d\n" % (fname, label))
# compute per channel mean and store for regression example
channelMean = np.mean(pixData, axis=(1,2))
regrFile.write("|regrLabels\t%f\t%f\t%f\n" % (channelMean[0]/255.0, channelMean[1]/255.0, channelMean[2]/255.0))
def saveMean(fname, data):
root = et.Element('opencv_storage')
et.SubElement(root, 'Channel').text = '3'
et.SubElement(root, 'Row').text = str(imgSize)
et.SubElement(root, 'Col').text = str(imgSize)
meanImg = et.SubElement(root, 'MeanImg', type_id='opencv-matrix')
et.SubElement(meanImg, 'rows').text = '1'
et.SubElement(meanImg, 'cols').text = str(imgSize * imgSize * 3)
et.SubElement(meanImg, 'dt').text = 'f'
et.SubElement(meanImg, 'data').text = ' '.join(['%e' % n for n in np.reshape(data, (imgSize * imgSize * 3))])
tree = et.ElementTree(root)
tree.write(fname)
x = xml.dom.minidom.parse(fname)
with open(fname, 'w') as f:
f.write(x.toprettyxml(indent = ' '))
```
`saveTrainImages` and `saveTestImages` are simple wrapper functions to iterate through the data set.
```
def saveTrainImages(filename, foldername):
if not os.path.exists(foldername):
os.makedirs(foldername)
data = {}
dataMean = np.zeros((3, imgSize, imgSize)) # mean is in CHW format.
with open('train_map.txt', 'w') as mapFile:
with open('train_regrLabels.txt', 'w') as regrFile:
for ifile in range(1, 6):
with open(os.path.join('./cifar-10-batches-py', 'data_batch_' + str(ifile)), 'rb') as f:
if sys.version_info[0] < 3:
data = cp.load(f)
else:
data = cp.load(f, encoding='latin1')
for i in range(10000):
fname = os.path.join(os.path.abspath(foldername), ('%05d.png' % (i + (ifile - 1) * 10000)))
saveImage(fname, data['data'][i, :], data['labels'][i], mapFile, regrFile, 4, mean=dataMean)
dataMean = dataMean / (50 * 1000)
saveMean('CIFAR-10_mean.xml', dataMean)
def saveTestImages(filename, foldername):
if not os.path.exists(foldername):
os.makedirs(foldername)
with open('test_map.txt', 'w') as mapFile:
with open('test_regrLabels.txt', 'w') as regrFile:
with open(os.path.join('./cifar-10-batches-py', 'test_batch'), 'rb') as f:
if sys.version_info[0] < 3:
data = cp.load(f)
else:
data = cp.load(f, encoding='latin1')
for i in range(10000):
fname = os.path.join(os.path.abspath(foldername), ('%05d.png' % i))
saveImage(fname, data['data'][i, :], data['labels'][i], mapFile, regrFile, 0)
# URLs for the train image and labels data
url_cifar_data = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# Paths for saving the text files
data_dir = './data/CIFAR-10/'
train_filename = data_dir + '/Train_cntk_text.txt'
test_filename = data_dir + '/Test_cntk_text.txt'
train_img_directory = data_dir + '/Train'
test_img_directory = data_dir + '/Test'
root_dir = os.getcwd()
if not os.path.exists(data_dir):
os.makedirs(data_dir)
try:
os.chdir(data_dir)
trn, tst= loadData('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
print ('Writing train text file...')
saveTxt(r'./Train_cntk_text.txt', trn)
print ('Done.')
print ('Writing test text file...')
saveTxt(r'./Test_cntk_text.txt', tst)
print ('Done.')
print ('Converting train data to png images...')
saveTrainImages(r'./Train_cntk_text.txt', 'train')
print ('Done.')
print ('Converting test data to png images...')
saveTestImages(r'./Test_cntk_text.txt', 'test')
print ('Done.')
finally:
os.chdir("../..")
```
| true |
code
| 0.393647 | null | null | null | null |
|
# MARATONA BEHIND THE CODE 2020
## DESAFIO 2: PARTE 1
### Introdução
Em projetos de ciência de dados visando a construção de modelos de *machine learning*, ou aprendizado estatístico, é muito incomum que os dados iniciais estejam já no formato ideal para a construção de modelos. São necessários vários passos intermediários de pré-processamento de dados, como por exemplo a codificação de variáveis categóricas, normalização de variáveis numéricas, tratamento de dados faltantes, etc. A biblioteca **scikit-learn** -- uma das mais populares bibliotecas de código-aberto para *machine learning* no mundo -- possui diversas funções já integradas para a realização das transformações de dados mais utilizadas. Entretanto, em um fluxo comum de um modelo de aprendizado de máquina, é necessária a aplicação dessas transformações pelo menos duas vezes: a primeira vez para "treinar" o modelo, e depois novamente quando novos dados forem enviados como entrada para serem classificados por este modelo.
Para facilitar o trabalho com esse tipo de fluxo, o scikit-learn possui também uma ferramenta chamada **Pipeline**, que nada mais é do que uma lista ordenada de transformações que devem ser aplicadas nos dados. Para auxiliar no desenvolvimento e no gerenciamento de todo o ciclo-de-vida dessas aplicações, alem do uso de Pipelines, as equipes de cientistas de dados podem utilizar em conjunto o **Watson Machine Learning**, que possui dezenas de ferramentas para treinar, gerenciar, hospedar e avaliar modelos baseados em aprendizado de máquina. Além disso, o Watson Machine Learning é capaz de encapsular pipelines e modelos em uma API pronta para uso e integração com outras aplicações.
Durante o desafio 2, você participante irá aprender a construir uma **Pipeline** para um modelo de classificação e hospedá-lo como uma API com o auxílio do Watson Machine Learning. Uma vez hospedado, você poderá integrar o modelo criado com outras aplicações, como assistentes virtuais e muito mais. Neste notebook, será apresentado um exemplo funcional de criação de um modelo e de uma pipeline no scikit-learn (que você poderá utilizar como template para a sua solução!).
## ** ATENÇÃO **
Este notebook serve apenas um propósito educativo, você pode alterar o código como quiser e nada aqui será avaliado/pontuado.
A recomendação é que você experimente e teste diferentes algoritmos aqui antes de passar para a *parte-2*, onde será realizado o deploy do seu modelo no **Watson Machine Learning** :)
### Trabalhando com Pipelines do scikit-learn
```
# Primeiro, realizamos a instalação do scikit-learn versão 0.20.3 e do xgboost versão 0.71 no Kernel deste notebook
# ** CUIDADO AO TROCAR A VERSÃO DAS BIBLIOTECAS -- VERSÕES DIFERENTES PODEM SER INCOMPATÍVEIS COM O WATSON STUDIO **
# OBS: A instalação do xgboost leva um tempo considerável
!pip install scikit-learn==0.20.3 --upgrade
!pip install xgboost==0.71 --upgrade
# Em seguida iremos importar diversas bibliotecas que serão utilizadas:
# Pacote para trabalhar com JSON
import json
# Pacote para realizar requisições HTTP
import requests
# Pacote para exploração e análise de dados
import pandas as pd
# Pacote com métodos numéricos e representações matriciais
import numpy as np
# Pacote para construção de modelo baseado na técnica Gradient Boosting
import xgboost as xgb
# Pacotes do scikit-learn para pré-processamento de dados
# "SimpleImputer" é uma transformação para preencher valores faltantes em conjuntos de dados
from sklearn.impute import SimpleImputer
# Pacotes do scikit-learn para treinamento de modelos e construção de pipelines
# Método para separação de conjunto de dados em amostras de treino e teste
from sklearn.model_selection import train_test_split
# Método para criação de modelos baseados em árvores de decisão
from sklearn.tree import DecisionTreeClassifier
# Classe para a criação de uma pipeline de machine-learning
from sklearn.pipeline import Pipeline
# Pacotes do scikit-learn para avaliação de modelos
# Métodos para validação cruzada do modelo criado
from sklearn.model_selection import KFold, cross_validate
```
### Importando um .csv de seu projeto no IBM Cloud Pak for Data para o Kernel deste notebook
Primeiro iremos importar o dataset fornecido para o desafio, que já está incluso neste projeto!
Você pode realizar a importação dos dados de um arquivo .csv diretamente para o Kernel do notebook como um DataFrame da biblioteca Pandas, muito utilizada para a manipulação de dados em Python.
Para realizar a importação, basta selecionar a próxima célula e seguir as instruções na imagem abaixo:

Após a seleção da opção **"Insert to code"**, a célula abaixo será preenchida com o código necessário para importação e leitura dos dados no arquivo .csv como um DataFrame Pandas.
```
<< INSIRA O DATASET COMO UM PANDAS DATAFRAME NESTA CÉLULA! >>>
```
Temos 15 colunas presentes no dataset fornecido, sendo dezessete delas variáveis características (dados de entrada) e um delas uma variável-alvo (que queremos que o nosso modelo seja capaz de prever).
As variáveis características são:
MATRICULA - número de matrícula do estudante
NOME - nome completo do estudante
REPROVACOES_DE - número de reprovações na disciplina de ``Direito Empresarial``
REPROVACOES_EM - número de reprovações na disciplina de ``Empreendedorismo``
REPROVACOES_MF - número de reprovações na disciplina de ``Matemática Financeira``
REPROVACOES_GO - número de reprovações na disciplina de ``Gestão Operacional``
NOTA_DE - média simples das notas do aluno na disciplina de ``Direito Empresarial`` (0-10)
NOTA_EM - média simples das notas do aluno na disciplina de ``Empreendedorismo`` (0-10)
NOTA_MF - média simples das notas do aluno na disciplina de ``Matemática Financeira`` (0-10)
NOTA_GO - média simples das notas do aluno na disciplina de ``Gestão Operacional`` (0-10)
INGLES - variável binária que indica se o estudante tem conhecimento em língua inglesa (0 -> sim ou 1 -> não).
H_AULA_PRES - horas de estudo presencial realizadas pelo estudante
TAREFAS_ONLINE - número de tarefas online entregues pelo estudante
FALTAS - número de faltas acumuladas do estudante (todas disciplinas)
A variável-alvo é:
PERFIL - uma *string* que indica uma de cinco possibilidades:
"EXCELENTE" - Estudante não necessita de mentoria
"MUITO BOM" - Estudante não necessita de mentoria
"HUMANAS" - Estudante necessita de mentoria exclusivamente em matérias com conteúdo de ciências humanas
"EXATAS" - Estudante necessita de mentoria apenas em disciplinas com conteúdo de ciências exatas
"DIFICULDADE" - Estudante necessita de mentoria em duas ou mais disciplinas
Com um modelo capaz de classificar um estudante em uma dessas categorias, podemos automatizar parte da mentoria estudantil através de assistentes virtuais, que serão capazes de recomendar práticas de estudo e conteúdo personalizado com base nas necessidades de cada aluno.
### Explorando os dados fornecidos
Podemos continuar a exploração dos dados fornecidos com a função ``info()``:
```
df_data_1.info()
```
É notado que existem variáveis do tipo ``float64`` (números "decimais"), variáveis do tipo ``int64`` (números inteiros) e do tipo ``object`` (nesse caso são *strings*, ou texto).
Como a maioria dos algoritmos de aprendizado estatístico supervisionado só aceita valores numéricos como entrada, é necessário então o pré-processamento das variáveis do tipo "object" antes de usar esse dataset como entrada para o treinamento de um modelo. Também é notado que existem valores faltantes em várias colunas. Esses valores faltantes também devem ser tratados antes de serem construídos modelos com esse conjunto de dados base.
A função ``describe()`` gera várias informações sobre as variáveis numéricas que também podem ser úteis:
```
df_data_1.describe()
```
### Visualizações
Para visualizar o dataset fornecido, podemos utilizar as bibliotecas ``matplotlib`` e ``seaborn``:
```
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(28, 4))
sns.countplot(ax=axes[0], x='REPROVACOES_DE', data=df_data_1)
sns.countplot(ax=axes[1], x='REPROVACOES_EM', data=df_data_1)
sns.countplot(ax=axes[2], x='REPROVACOES_MF', data=df_data_1)
sns.countplot(ax=axes[3], x='REPROVACOES_GO', data=df_data_1)
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(28, 4))
sns.distplot(df_data_1['NOTA_DE'], ax=axes[0])
sns.distplot(df_data_1['NOTA_EM'], ax=axes[1])
sns.distplot(df_data_1['NOTA_MF'], ax=axes[2])
sns.distplot(df_data_1['NOTA_GO'].dropna(), ax=axes[3])
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(28, 4))
sns.countplot(ax=axes[0], x='INGLES', data=df_data_1)
sns.countplot(ax=axes[1], x='FALTAS', data=df_data_1)
sns.countplot(ax=axes[2], x='H_AULA_PRES', data=df_data_1)
sns.countplot(ax=axes[3], x='TAREFAS_ONLINE', data=df_data_1)
fig = plt.plot()
sns.countplot(x='PERFIL', data=df_data_1)
```
## ** ATENÇÃO **
Você pode notar pela figura acima que este dataset é desbalanceado, isto é, a quantidade de amostras para cada classe que desejamos classificar é bem discrepante. O participante é livre para adicionar ou remover **LINHAS** no dataset fornecido, inclusive utilizar bibliotecas para balanceamento com ``imblearn``. Entretanto tome **muito cuidado**!!! Você não pode alterar os tipos dos dados e nem remover ou desordenar o dataset fornecido. Todas as operações desse tipo deverão ser feitas por meio de Transforms do scikit-learn :)
<hr>
### Realizando o pré-processamento dos dados
Para o pré-processamento dos dados serão apresentadas duas transformações básicas neste notebook, demonstrando a construção de uma Pipeline com um modelo funcional. Esta Pipeline funcional fornecida deverá ser melhorada pelo participante para que o modelo final alcance a maior acurácia possível, garantindo uma pontuação maior no desafio. Essa melhoria pode ser feita apenas no pré-processamento dos dados, na escolha de um algoritmo para treinamento de modelo diferente, ou até mesmo na alteração do *framework* usado (entretanto só será fornecido um exemplo pronto de integração do Watson Machine Learning com o *scikit-learn*).
A primeira transformação (passo na nossa Pipeline) será a exclusão da coluna "NOME" do nosso dataset, que além de não ser uma variável numérica, também não é uma variável relacionada ao desempenho dos estudantes nas disciplinas. Existem funções prontas no scikit-learn para a realização dessa transformação, entretanto nosso exemplo irá demonstrar como criar uma transformação personalizada do zero no scikit-learn. Se desejado, o participante poderá utilizar esse exemplo para criar outras transformações e adicioná-las à Pipeline final :)
#### Transformação 1: excluindo colunas do dataset
Para a criação de uma transformação de dados personalizada no scikit-learn, é necessária basicamente a criação de uma classe com os métodos ``transform`` e ``fit``. No método transform será executada a lógica da nossa transformação.
Na próxima célula é apresentado o código completo de uma transformação ``DropColumns`` para a remoção de colunas de um DataFrame pandas.
```
from sklearn.base import BaseEstimator, TransformerMixin
# All sklearn Transforms must have the `transform` and `fit` methods
class DropColumns(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
# Primeiro realizamos a cópia do dataframe 'X' de entrada
data = X.copy()
# Retornamos um novo dataframe sem as colunas indesejadas
return data.drop(labels=self.columns, axis='columns')
```
Para aplicar essa transformação em um DataFrame pandas, basta instanciar um objeto *DropColumns* e chamar o método transform().
```
# Instanciando uma transformação DropColumns
rm_columns = DropColumns(
columns=["NOME"] # Essa transformação recebe como parâmetro uma lista com os nomes das colunas indesejadas
)
print(rm_columns)
# Visualizando as colunas do dataset original
print("Colunas do dataset original: \n")
print(df_data_1.columns)
# Aplicando a transformação ``DropColumns`` ao conjunto de dados base
rm_columns.fit(X=df_data_1)
# Reconstruindo um DataFrame Pandas com o resultado da transformação
df_data_2 = pd.DataFrame.from_records(
data=rm_columns.transform(
X=df_data_1
),
)
# Visualizando as colunas do dataset transformado
print("Colunas do dataset após a transformação ``DropColumns``: \n")
print(df_data_2.columns)
```
Nota-se que a coluna "NOME" foi removida e nosso dataset agora poossui apenas 17 colunas.
#### Transformação 2: tratando dados faltantes
Para tratar os dados faltantes em nosso conjunto de dados, iremos agora utilizar uma transformação pronta da biblioteca scikit-learn, chamada **SimpleImputer**.
Essa transformação permite diversas estratégias para o tratamento de dados faltantes. A documentação oficial pode ser encontrada em: https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html
Neste exemplo iremos simplesmente transformar todos os valores faltantes em zero.
```
# Criação de um objeto ``SimpleImputer``
si = SimpleImputer(
missing_values=np.nan, # os valores faltantes são do tipo ``np.nan`` (padrão Pandas)
strategy='constant', # a estratégia escolhida é a alteração do valor faltante por uma constante
fill_value=0, # a constante que será usada para preenchimento dos valores faltantes é um int64=0.
verbose=0,
copy=True
)
# Visualizando os dados faltantes do dataset após a primeira transformação (df_data_2)
print("Valores nulos antes da transformação SimpleImputer: \n\n{}\n".format(df_data_2.isnull().sum(axis = 0)))
# Aplicamos o SimpleImputer ``si`` ao conjunto de dados df_data_2 (resultado da primeira transformação)
si.fit(X=df_data_2)
# Reconstrução de um novo DataFrame Pandas com o conjunto imputado (df_data_3)
df_data_3 = pd.DataFrame.from_records(
data=si.transform(
X=df_data_2
), # o resultado SimpleImputer.transform(<<pandas dataframe>>) é lista de listas
columns=df_data_2.columns # as colunas originais devem ser conservadas nessa transformação
)
# Visualizando os dados faltantes do dataset após a segunda transformação (SimpleImputer) (df_data_3)
print("Valores nulos no dataset após a transformação SimpleImputer: \n\n{}\n".format(df_data_3.isnull().sum(axis = 0)))
```
Nota-se que não temos mais nenhum valor faltante no nosso conjunto de dados :)
Vale salientar que nem sempre a alteração dos valores faltantes por 0 é a melhor estratégia. O participante é incentivado a estudar e implementar estratégias diferentes de tratamento dos valores faltantes para aprimorar seu modelo e melhorar sua pontuação final.
### Treinando um modelo de classificação
Finalizado o pré-processamento, já temos o conjunto de dados no formato necessário para o treinamento do nosso modelo:
```
df_data_3.head()
```
No exemplo fornecido, iremos utilizar todas as colunas, exceto a coluna **LABELS** como *features* (variáveis de entrada).
A variável **LABELS** será a variável-alvo do modelo, conforme descrito no enunciado do desafio.
#### Definindo as features do modelo
```
# Definição das colunas que serão features (nota-se que a coluna NOME não está presente)
features = [
"MATRICULA", 'REPROVACOES_DE', 'REPROVACOES_EM', "REPROVACOES_MF", "REPROVACOES_GO",
"NOTA_DE", "NOTA_EM", "NOTA_MF", "NOTA_GO",
"INGLES", "H_AULA_PRES", "TAREFAS_ONLINE", "FALTAS",
]
# Definição da variável-alvo
target = ["PERFIL"]
# Preparação dos argumentos para os métodos da biblioteca ``scikit-learn``
X = df_data_3[features]
y = df_data_3[target]
```
O conjunto de entrada (X):
```
X.head()
```
As variáveis-alvo correspondentes (y):
```
y.head()
```
#### Separando o dataset em um conjunto de treino e um conjunto de teste
Iremos separar o dataset fornecido em dois grupos: um para treinar nosso modelo, e outro para testarmos o resultado através de um teste cego. A separação do dataset pode ser feita facilmente com o método *train_test_split()* do scikit-learn:
```
# Separação dos dados em um conjunto de treino e um conjunto de teste
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=337)
```
<hr>
#### Criando um modelo baseado em árvores de decisão
No exemplo fornecido iremos criar um classificador baseado em **árvores de decisão**.
Material teórico sobre árvores de decisão na documentação oficial do scikit-learn: https://scikit-learn.org/stable/modules/tree.html
O primeiro passo é basicamente instanciar um objeto *DecisionTreeClassifier()* da biblioteca scikit-learn.
```
# Criação de uma árvore de decisão com a biblioteca ``scikit-learn``:
decision_tree = DecisionTreeClassifier()
```
#### Testando o classificador baseado em árvore de decisão
```
# Treino do modelo (é chamado o método *fit()* com os conjuntos de treino)
decision_tree.fit(
X_train,
y_train
)
```
#### Execução de predições e avaliação da árvore de decisão
```
# Realização de teste cego no modelo criado
y_pred = decision_tree.predict(X_test)
X_test.head()
print(y_pred)
from sklearn.metrics import accuracy_score
# Acurácia alcançada pela árvore de decisão
print("Acurácia: {}%".format(100*round(accuracy_score(y_test, y_pred), 2)))
```
<hr>
Neste notebook foi demonstrado como trabalhar com transformações e modelos com a biblioteca scikit-learn. É recomendado que o participante realize seus experimentos editando o código fornecido aqui até que um modelo com acurácia elevada seja alcançado.
Quando você estiver satisfeito com seu modelo, pode passar para a segunda etapa do desafio -- encapsular seu modelo como uma API REST pronta para uso com o Watson Machine Learning!
O notebook para a segunda etapa já se encontra neste projeto, basta acessar a aba **ASSETS** e inicializá-lo! Não se esqueca de antes desligar o Kernel deste notebook para reduzir o consumo de sua camada grátis do IBM Cloud Pak for Data.
| true |
code
| 0.541166 | null | null | null | null |
|
# Discrete stochastic Erlang SEIR model
Author: Lam Ha @lamhm
Date: 2018-10-03
## Calculate Discrete Erlang Probabilities
The following function is to calculate the discrete truncated Erlang probability, given $k$ and $\gamma$:
\begin{equation*}
p_i =
\frac{1}{C(n^{E})}
\Bigl(\sum_{j=0}^{k-1}
\frac{e^{-(i-1)\gamma} \times ((i-1)\gamma)^{j}} {j!}
-\sum_{j=0}^{k-1}
\frac{e^{-i\gamma} \times (i\gamma)^{j}} {j!}\Bigr),\quad\text{for $i=1,...,n^{E}$}.
\end{equation*}
where
\begin{equation*}
n^{E} = argmin_n\Bigl(C(n) = 1 - \sum_{j=0}^{k-1}
\frac{e^{-n\gamma} \times (n\gamma)^{j}} {j!} > 0.99 \Bigr)
\end{equation*}
**N.B. The formula of $p_i$ here is slightly different from what is shown in the original paper because the latter (which is likely to be wrong) would lead to negative probabilities.**
```
#' @param k The shape parameter of the Erlang distribution.
#' @param gamma The rate parameter of the Erlang distribution.
#' @return A vector containing all p_i values, for i = 1 : n.
compute_erlang_discrete_prob <- function(k, gamma) {
n_bin <- 0
factorials <- 1 ## 0! = 1
for (i in 1 : k) {
factorials[i + 1] <- factorials[i] * i ## factorial[i + 1] = i!
}
one_sub_cummulative_probs <- NULL
cummulative_prob <- 0
while (cummulative_prob <= 0.99) {
n_bin <- n_bin + 1
one_sub_cummulative_probs[n_bin] <- 0
for ( j in 0 : (k - 1) ) {
one_sub_cummulative_probs[n_bin] <-
one_sub_cummulative_probs[n_bin] +
(
exp( -n_bin * gamma )
* ( (n_bin * gamma) ^ j )
/ factorials[j + 1] ## factorials[j + 1] = j!
)
}
cummulative_prob <- 1 - one_sub_cummulative_probs[n_bin]
}
one_sub_cummulative_probs <- c(1, one_sub_cummulative_probs)
density_prob <-
head(one_sub_cummulative_probs, -1) - tail(one_sub_cummulative_probs, -1)
density_prob <- density_prob / cummulative_prob
return(density_prob)
}
```
The implementation above calculates discrete probabilities $p_i$'s base on the cummulative density function of the Erlang distribution:
\begin{equation*}
p_i = CDF_{Erlang}(x = i) - CDF_{Erlang}(x = i-1)
\end{equation*}
Meanwhile, the estimates of $p_i$'s in the original paper seems to be based on the probability density function:
\begin{equation*}
p_i = PDF_{Erlang}(x = i)
\end{equation*}
While the two methods give slightly different estimates, they do not lead to any visible differences in the results of the subsequent simulations. This implementation uses the CDF function since it leads to faster runs.
## Simulate the SEIR Dynamics
The next function is to simulate the SEIR (susceptible, exposed, infectious, recovered) dynamics of an epidemic, assuming that transmission is frequency-dependent, i.e.
\begin{equation*}
\beta = \beta_0 \frac{I(t)}{N}
\end{equation*}
where $N$ is the population size, $I(t)$ is the number of infectious people at time $t$, and $\beta_0$ is the base transmission rate.
This model does not consider births and deads (i.e. $N$ is constant).
The rates at which individuals move through the E and the I classes are assumed to follow Erlang distributions of given shapes ($k^E$, $k^I$) and rates ($\gamma^E$, $\gamma^I$).
```
#' @param initial_state A vector that contains 4 numbers corresponding to the
#' initial values of the 4 classes: S, E, I, and R.
#' @param parameters A vector that contains 5 numbers corresponding to the
#' following parameters: the shape and the rate parameters
#' of the Erlang distribution that will be used to
#' calculate the transition rates between the E components
#' (i.e. k[E] and gamma[E]), the shape and the rate parameters
#' of the Erlang distribution that will be used to
#' calculate the transition rates between the I components
#' (i.e. k[I] and gamma[I]), and the base transmission rate
#' (i.e. beta).
#' @param max_time The length of the simulation.
#' @return A data frame containing the values of S, E, I, and R over time
#' (from 1 to max_time).
seir_simulation <- function(initial_state, parameters, max_time) {
names(initial_state) <- c("S", "E", "I", "R")
names(parameters) <- c( "erlang_shape_for_E", "erlang_rate_for_E",
"erlang_shape_for_I", "erlang_rate_for_I",
"base_transmission_rate" )
population_size <- sum(initial_state)
sim_data <- data.frame( time = c(1 : max_time),
S = NA, E = NA, I = NA, R = NA )
sim_data[1, 2:5] <- initial_state
## Initialise a matrix to store the states of the exposed sub-blocks over time.
exposed_block_adm_rates <- compute_erlang_discrete_prob(
k = parameters["erlang_shape_for_E"],
gamma = parameters["erlang_rate_for_E"]
)
n_exposed_blocks <- length(exposed_block_adm_rates)
exposed_blocks <- matrix( data = 0, nrow = max_time,
ncol = n_exposed_blocks )
exposed_blocks[1, n_exposed_blocks] <- sim_data$E[1]
## Initialise a matrix to store the states of the infectious sub-blocks over time.
infectious_block_adm_rates <- compute_erlang_discrete_prob(
k = parameters["erlang_shape_for_I"],
gamma = parameters["erlang_rate_for_I"]
)
n_infectious_blocks <- length(infectious_block_adm_rates)
infectious_blocks <- matrix( data = 0, nrow = max_time,
ncol = n_infectious_blocks )
infectious_blocks[1, n_infectious_blocks] <- sim_data$I[1]
## Run the simulation from time t = 2 to t = max_time
for (time in 2 : max_time) {
transmission_rate <-
parameters["base_transmission_rate"] * sim_data$I[time - 1] /
population_size
exposure_prob <- 1 - exp(-transmission_rate)
new_exposed <- rbinom(1, sim_data$S[time - 1], exposure_prob)
new_infectious <- exposed_blocks[time - 1, 1]
new_recovered <- infectious_blocks[time - 1, 1]
if (new_exposed > 0) {
exposed_blocks[time, ] <- t(
rmultinom(1, size = new_exposed,
prob = exposed_block_adm_rates)
)
}
exposed_blocks[time, ] <-
exposed_blocks[time, ] +
c( exposed_blocks[time - 1, 2 : n_exposed_blocks], 0 )
if (new_infectious > 0) {
infectious_blocks[time, ] <- t(
rmultinom(1, size = new_infectious,
prob = infectious_block_adm_rates)
)
}
infectious_blocks[time, ] <-
infectious_blocks[time, ] +
c( infectious_blocks[time - 1, 2 : n_infectious_blocks], 0 )
sim_data$S[time] <- sim_data$S[time - 1] - new_exposed
sim_data$E[time] <- sum(exposed_blocks[time, ])
sim_data$I[time] <- sum(infectious_blocks[time, ])
sim_data$R[time] <- sim_data$R[time - 1] + new_recovered
}
return(sim_data)
}
```
To run a simulation, simply call the $seir\_simulation(\dots)$ method above.
Below is an example simulation where $k^E = 5$, $\gamma^E = 1$, $k^I = 10$, $\gamma^I = 1$, and $\beta_0 = 0.25$ ($R_0 = \beta_0\frac{k^I}{\gamma^I} = 2.5$). The population size is $N = 10,000$. The simmulation starts with 1 exposed case and everyone else belongs to the susceptible class. These settings are the same the the simulation 11 of the original paper.
**N.B. Since this is a stochastic model, there is chance for the outbreak not to occur even with a high $R_0$.**
```
sim <- seir_simulation( initial_state = c(S = 9999, E = 1, I = 0, R = 0),
parameters = c(5, 1, 10, 1, 0.25),
max_time = 300 )
```
## Visualisation
```
library(ggplot2)
ggplot(sim, aes(time)) +
geom_line(aes(y = S, colour = "Susceptible"), lwd = 1) +
geom_line(aes(y = E, colour = "Exposed"), lwd = 1) +
geom_line(aes(y = I, colour = "Infectious"), lwd = 1) +
geom_line(aes(y = R, colour = "Recovered"), lwd = 1) +
xlab("Time") + ylab("Number of Individuals")
```
## Test Case
```
set.seed(12345)
test_sim <- seir_simulation( initial_state = c(S = 9999, E = 1, I = 0, R = 0),
parameters = c(5, 1, 10, 1, 0.25),
max_time = 100 )
test_result <- as.matrix( tail(test_sim, 3) )
correct_result <- matrix( c( 98, 7384, 794, 1015, 807,
99, 7184, 864, 1068, 884,
100, 6986, 920, 1144, 950), nrow = 3, byrow = T )
n_correct_cells <- sum(correct_result == test_result)
cat("\n--------------------\n")
if (n_correct_cells == 15) {
cat(" Test PASSED\n")
} else {
cat(" Test FAILED\n")
}
cat("--------------------\n\n")
```
| true |
code
| 0.762468 | null | null | null | null |
|
# <div align="center">Random Forest Classification in Python</div>
---------------------------------------------------------------------
you can Find me on Github:
> ###### [ GitHub](https://github.com/lev1khachatryan)
<img src="asset/main.png" />
<a id="top"></a> <br>
## Notebook Content
1. [The random forests algorithm](#1)
2. [How does the algorithm work?](#2)
3. [Its advantages and disadvantages](#3)
4. [Finding important features](#4)
5. [Comparision between random forests and decision trees](#5)
6. [Building a classifier with scikit-learn](#6)
7. [Finding important features with scikit-learn](#7)
<a id="1"></a> <br>
# <div align="center">1. The Random Forests Algorithm</div>
---------------------------------------------------------------------
[go to top](#top)
Random forests is a supervised learning algorithm. It can be used both for classification and regression. It is also the most flexible and easy to use algorithm. A forest is comprised of trees. It is said that the more trees it has, the more robust a forest is. Random forests creates decision trees on randomly selected data samples, gets prediction from each tree and selects the best solution by means of voting. It also provides a pretty good indicator of the feature importance.
Random forests has a variety of applications, such as recommendation engines, image classification and feature selection. It can be used to classify loyal loan applicants, identify fraudulent activity and predict diseases. It lies at the base of the Boruta algorithm, which selects important features in a dataset.
Let’s understand the algorithm in layman’s terms. Suppose you want to go on a trip and you would like to travel to a place which you will enjoy.
So what do you do to find a place that you will like? You can search online, read reviews on travel blogs and portals, or you can also ask your friends.
Let’s suppose you have decided to ask your friends, and talked with them about their past travel experience to various places. You will get some recommendations from every friend. Now you have to make a list of those recommended places. Then, you ask them to vote (or select one best place for the trip) from the list of recommended places you made. The place with the highest number of votes will be your final choice for the trip.
In the above decision process, there are two parts. First, asking your friends about their individual travel experience and getting one recommendation out of multiple places they have visited. This part is like using the decision tree algorithm. Here, each friend makes a selection of the places he or she has visited so far.
The second part, after collecting all the recommendations, is the voting procedure for selecting the best place in the list of recommendations. This whole process of getting recommendations from friends and voting on them to find the best place is known as the random forests algorithm.
It technically is an ensemble method (based on the divide-and-conquer approach) of decision trees generated on a randomly split dataset. This collection of decision tree classifiers is also known as the forest. The individual decision trees are generated using an attribute selection indicator such as information gain, gain ratio, and Gini index for each attribute. Each tree depends on an independent random sample. In a classification problem, each tree votes and the most popular class is chosen as the final result. In the case of regression, the average of all the tree outputs is considered as the final result. It is simpler and more powerful compared to the other non-linear classification algorithms.
<a id="2"></a> <br>
# <div align="center">2. How does the algorithm work?</div>
---------------------------------------------------------------------
[go to top](#top)
It works in four steps:
1) Select random samples from a given dataset.
2) Construct a decision tree for each sample and get a prediction result from each decision tree.
3) Perform a vote for each predicted result.
4) Select the prediction result with the most votes as the final prediction.
<img src="asset/1.png" />
<a id="3"></a> <br>
# <div align="center">3. Its advantages and disadvantages</div>
---------------------------------------------------------------------
[go to top](#top)
### Advantages:
* Random forests is considered as a highly accurate and robust method because of the number of decision trees participating in the process.
* It does not suffer from the overfitting problem. The main reason is that it takes the average of all the predictions, which cancels out the biases.
* The algorithm can be used in both classification and regression problems.
* Random forests can also handle missing values. There are two ways to handle these: using median values to replace continuous variables, and computing the proximity-weighted average of missing values.
* You can get the relative feature importance, which helps in selecting the most contributing features for the classifier.
### Disadvantages:
* Random forests is slow in generating predictions because it has multiple decision trees. Whenever it makes a prediction, all the trees in the forest have to make a prediction for the same given input and then perform voting on it. This whole process is time-consuming.
* The model is difficult to interpret compared to a decision tree, where you can easily make a decision by following the path in the tree.
<a id="4"></a> <br>
# <div align="center">4. Finding important features</div>
---------------------------------------------------------------------
[go to top](#top)
Random forests also offers a good feature selection indicator. Scikit-learn provides an extra variable with the model, which shows the relative importance or contribution of each feature in the prediction. It automatically computes the relevance score of each feature in the training phase. Then it scales the relevance down so that the sum of all scores is 1.
This score will help you choose the most important features and drop the least important ones for model building.
Random forest uses ***gini importance*** or mean decrease in impurity (***MDI***) to calculate the importance of each feature. Gini importance is also known as the total decrease in node impurity. This is how much the model fit or accuracy decreases when you drop a variable. The larger the decrease, the more significant the variable is. Here, the mean decrease is a significant parameter for variable selection. The Gini index can describe the overall explanatory power of the variables.
<a id="5"></a> <br>
# <div align="center">5. Random Forests vs Decision Trees</div>
---------------------------------------------------------------------
[go to top](#top)
* Random forests is a set of multiple decision trees.
* Deep decision trees may suffer from overfitting, but random forests prevents overfitting by creating trees on random subsets.
* Decision trees are computationally faster.
* Random forests is difficult to interpret, while a decision tree is easily interpretable and can be converted to rules.
<a id="6"></a> <br>
# <div align="center">6. Building a Classifier using Scikit-learn</div>
---------------------------------------------------------------------
[go to top](#top)
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.ensemble import RandomForestClassifier # Import Random Forest Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from sklearn.metrics import auc, \
confusion_matrix, \
classification_report, \
roc_curve, \
roc_auc_score, \
precision_recall_curve, \
average_precision_score, \
accuracy_score, \
balanced_accuracy_score, \
precision_score, \
recall_score
def roc_curve_plot(fpr, tpr):
'''
Plot ROC rurve
Parameters:
fpr: float
tpr: float
Returns:
plot: ROC curve graph
'''
x = np.linspace(0,1,100)
plt.figure(figsize = (10,6))
plt.plot(fpr, tpr)
plt.plot(x,x,".", markersize = 1.6)
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
users = pd.read_csv('input/All_Users.csv')
KPIs = pd.read_csv('input/KPIs_2&3.csv')
Activities = pd.merge(users, KPIs)
Activities.fillna(0, inplace =True)
Activities['Learn'] = Activities.L + Activities.UL
Activities['Social_1'] = Activities.UC + Activities.UP + Activities.DP
Activities['Social_2'] = Activities.CP + Activities.P + Activities.OP
Checkins = pd.read_csv('input/Checkins_4,5&6.csv')
retained_activities = pd.read_csv('input/KPIs_4,5&6.csv')
Retention = pd.merge(pd.merge(users, Checkins, how = 'left'), retained_activities)
Retention.fillna(0, inplace =True)
Retention['Learn'] = Retention.L + Retention.UL
Retention['Social_1'] = Retention.UC + Retention.UP + Retention.DP
Retention['Social_2'] = Retention.CP + Retention.P + Retention.OP
Retention['Total'] = Retention.Learn + Retention.Social_1 + Retention.Social_2
Retention['y'] = np.where((Retention.NofCheckins > 0) & (Retention.Total >= 3) & (Retention.Learn >= 0) & (Retention.Social_1 >= 0), 1 , 0)
# columns to use
X_col = ['UC', 'UP', 'DP', 'CP', 'L', 'UL', 'P', 'OP', 'F']
# X_col = ['Learn', 'Social_1', 'Social_2']
y_col = 'y'
X = Activities[X_col]
y = Retention[y_col]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) # 70% training and 30% test
from sklearn.model_selection import GridSearchCV
rfc = RandomForestClassifier(random_state=42)
param_grid = {
'n_estimators': [3, 4, 5, 200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)
CV_rfc.fit(X_train, y_train)
CV_rfc.best_params_
clf = RandomForestClassifier(random_state=42, max_features='auto', n_estimators= 500, max_depth=8, criterion='entropy')
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
pred = clf.predict(X_test)
pred_prob = clf.predict_proba(X_test)
'''
Obtain confusion_matrix
'''
tn, fp, fn, tp = confusion_matrix(y_true = y_test, y_pred = pred, labels = np.array([0,1])).ravel()
print(tn, fp, fn, tp)
'''
Calculate auc(Area Under the Curve) for positive class
'''
fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = pred_prob[:,1], pos_label = 1)
auc_random_forest = auc(fpr,tpr)
print(auc_random_forest)
roc_curve_plot(fpr=fpr, tpr=tpr)
'''
Calculation of metrics using standard functions
'''
print('Accuracy: {}'.format(accuracy_score(y_test,pred)))
print('Balanced accuracy: {}'.format(balanced_accuracy_score(y_test, pred)))
print('Precision: {}'.format(precision_score(y_test, pred)))
print('Recall: {}'.format(recall_score(y_test, pred)))
```
<a id="7"></a> <br>
# <div align="center">7. Finding Important Features in Scikit-learn</div>
---------------------------------------------------------------------
[go to top](#top)
Here, you are finding important features or selecting features in the dataset. In scikit-learn, you can perform this task in the following steps:
1) First, you need to create a random forests model.
2) Second, use the feature importance variable to see feature importance scores.
3) Third, visualize these scores using the seaborn library.
```
feature_imp = pd.Series(clf.feature_importances_, index=X_train.columns.values).sort_values(ascending=False)
feature_imp
```
You can also visualize the feature importance. Visualizations are easy to understand and interpretable.
For visualization, you can use a combination of matplotlib and seaborn. Because seaborn is built on top of matplotlib, it offers a number of customized themes and provides additional plot types. Matplotlib is a superset of seaborn and both are equally important for good visualizations.
```
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Creating a bar plot
sns.barplot(x=feature_imp, y=feature_imp.index)
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features")
plt.show();
```
| true |
code
| 0.625552 | null | null | null | null |
|
# Neural Nets with Keras
In this notebook you will learn how to implement neural networks using the Keras API. We will use TensorFlow's own implementation, *tf.keras*, which comes bundled with TensorFlow.
Don't hesitate to look at the documentation at [keras.io](https://keras.io/). All the code examples should work fine with tf.keras, the only difference is how to import Keras:
```python
# keras.io code:
from keras.layers import Dense
output_layer = Dense(10)
# corresponding tf.keras code:
from tensorflow.keras.layers import Dense
output_layer = Dense(10)
# or:
from tensorflow import keras
output_layer = keras.layers.Dense(10)
```
In this notebook, we will not use any TensorFlow-specific code, so everything you see would run just the same way on [keras-team](https://github.com/keras-team/keras) or any other Python implementation of the Keras API (except for the imports).
## Imports
```
%matplotlib inline
%load_ext tensorboard.notebook
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sklearn
import sys
import tensorflow as tf
from tensorflow import keras # tf.keras
import time
print("python", sys.version)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
assert sys.version_info >= (3, 5) # Python ≥3.5 required
assert tf.__version__ >= "2.0" # TensorFlow ≥2.0 required
```
**Note**: The preview version of TensorFlow 2.0 shows up as version 1.13. That's okay. To test that this behaves like TF 2.0, we verify that `tf.function()` is present.

## Exercise 1 – TensorFlow Playground
Visit the [TensorFlow Playground](http://playground.tensorflow.org).
* **Layers and patterns**: try training the default neural network by clicking the "Run" button (top left). Notice how it quickly finds a good solution for the classification task. Notice that the neurons in the first hidden layer have learned simple patterns, while the neurons in the second hidden layer have learned to combine the simple patterns of the first hidden layer into more complex patterns). In general, the more layers, the more complex the patterns can be.
* **Activation function**: try replacing the Tanh activation function with the ReLU activation function, and train the network again. Notice that it finds a solution even faster, but this time the boundaries are linear. This is due to the shape of the ReLU function.
* **Local minima**: modify the network architecture to have just one hidden layer with three neurons. Train it multiple times (to reset the network weights, just add and remove a neuron). Notice that the training time varies a lot, and sometimes it even gets stuck in a local minimum.
* **Too small**: now remove one neuron to keep just 2. Notice that the neural network is now incapable of finding a good solution, even if you try multiple times. The model has too few parameters and it systematically underfits the training set.
* **Large enough**: next, set the number of neurons to 8 and train the network several times. Notice that it is now consistently fast and never gets stuck. This highlights an important finding in neural network theory: large neural networks almost never get stuck in local minima, and even when they do these local optima are almost as good as the global optimum. However, they can still get stuck on long plateaus for a long time.
* **Deep net and vanishing gradients**: now change the dataset to be the spiral (bottom right dataset under "DATA"). Change the network architecture to have 4 hidden layers with 8 neurons each. Notice that training takes much longer, and often gets stuck on plateaus for long periods of time. Also notice that the neurons in the highest layers (i.e. on the right) tend to evolve faster than the neurons in the lowest layers (i.e. on the left). This problem, called the "vanishing gradients" problem, can be alleviated using better weight initialization and other techniques, better optimizers (such as AdaGrad or Adam), or using Batch Normalization.
* **More**: go ahead and play with the other parameters to get a feel of what they do. In fact, after this course you should definitely play with this UI for at least one hour, it will grow your intuitions about neural networks significantly.

## Exercise 2 – Image classification with tf.keras
### Load the Fashion MNIST dataset
Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in `keras.datasets`. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set:
```
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = (
fashion_mnist.load_data())
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
```
The training set contains 55,000 grayscale images, each 28x28 pixels:
```
X_train.shape
```
Each pixel intensity is represented by a uint8 (byte) from 0 to 255:
```
X_train[0]
```
You can plot an image using Matplotlib's `imshow()` function, with a `'binary'`
color map:
```
plt.imshow(X_train[0], cmap="binary")
plt.show()
```
The labels are the class IDs (represented as uint8), from 0 to 9:
```
y_train
```
Here are the corresponding class names:
```
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
```
So the first image in the training set is a coat:
```
class_names[y_train[0]]
```
The validation set contains 5,000 images, and the test set contains 10,000 images:
```
X_valid.shape
X_test.shape
```
Let's take a look at a sample of the images in the dataset:
```
n_rows = 5
n_cols = 10
plt.figure(figsize=(n_cols*1.4, n_rows * 1.6))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
plt.axis('off')
plt.title(class_names[y_train[index]])
plt.show()
```
This dataset has the same structure as the famous MNIST dataset (which you can load using `keras.datasets.mnist.load_data()`), except the images represent fashion items rather than handwritten digits, and it is much more challenging. A simple linear model can reach 92% accuracy on MNIST, but only 83% on fashion MNIST.
### Build a classification neural network with Keras
### 2.1)
Build a `Sequential` model (`keras.models.Sequential`), without any argument, then and add four layers to it by calling its `add()` method:
* a `Flatten` layer (`keras.layers.Flatten`) to convert each 28x28 image to a single row of 784 pixel values. Since it is the first layer in your model, you should specify the `input_shape` argument, leaving out the batch size: `[28, 28]`.
* a `Dense` layer (`keras.layers.Dense`) with 300 neurons (aka units), and the `"relu"` activation function.
* Another `Dense` layer with 100 neurons, also with the `"relu"` activation function.
* A final `Dense` layer with 10 neurons (one per class), and with the `"softmax"` activation function to ensure that the sum of all the estimated class probabilities for each image is equal to 1.
```
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28,28]))
model.add(keras.layers.Dense(300, activation="relu"))
```
### 2.2)
Alternatively, you can pass a list containing the 4 layers to the constructor of the `Sequential` model. The model's `layers` attribute holds the list of layers.
### 2.3)
Call the model's `summary()` method and examine the output. Also, try using `keras.utils.plot_model()` to save an image of your model's architecture. Alternatively, you can uncomment the following code to display the image within Jupyter.
**Warning**: you will need `pydot` and `graphviz` to use `plot_model()`.
### 2.4)
After a model is created, you must call its `compile()` method to specify the `loss` function and the `optimizer` to use. In this case, you want to use the `"sparse_categorical_crossentropy"` loss, and the `"sgd"` optimizer (stochastic gradient descent). Moreover, you can optionally specify a list of additional metrics that should be measured during training. In this case you should specify `metrics=["accuracy"]`. **Note**: you can find more loss functions in `keras.losses`, more metrics in `keras.metrics` and more optimizers in `keras.optimizers`.
### 2.5)
Now your model is ready to be trained. Call its `fit()` method, passing it the input features (`X_train`) and the target classes (`y_train`). Set `epochs=10` (or else it will just run for a single epoch). You can also (optionally) pass the validation data by setting `validation_data=(X_valid, y_valid)`. If you do, Keras will compute the loss and the additional metrics (the accuracy in this case) on the validation set at the end of each epoch. If the performance on the training set is much better than on the validation set, your model is probably overfitting the training set (or there is a bug, such as a mismatch between the training set and the validation set).
**Note**: the `fit()` method will return a `History` object containing training stats. Make sure to preserve it (`history = model.fit(...)`).
### 2.6)
Try running `pd.DataFrame(history.history).plot()` to plot the learning curves. To make the graph more readable, you can also set `figsize=(8, 5)`, call `plt.grid(True)` and `plt.gca().set_ylim(0, 1)`.
### 2.7)
Try running `model.fit()` again, and notice that training continues where it left off.
### 2.8)
call the model's `evaluate()` method, passing it the test set (`X_test` and `y_test`). This will compute the loss (cross-entropy) on the test set, as well as all the additional metrics (in this case, the accuracy). Your model should achieve over 80% accuracy on the test set.
### 2.9)
Define `X_new` as the first 10 instances of the test set. Call the model's `predict()` method to estimate the probability of each class for each instance (for better readability, you may use the output array's `round()` method):
### 2.10)
Often, you may only be interested in the most likely class. Use `np.argmax()` to get the class ID of the most likely class for each instance. **Tip**: you want to set `axis=1`.
### 2.11)
Call the model's `predict_classes()` method for `X_new`. You should get the same result as above.
### 2.12)
(Optional) It is often useful to know how confident the model is for each prediction. Try finding the estimated probability for each predicted class using `np.max()`.
### 2.13)
(Optional) It is frequent to want the top k classes and their estimated probabilities rather just the most likely class. You can use `np.argsort()` for this.

## Exercise 2 - Solution
### 2.1)
Build a `Sequential` model (`keras.models.Sequential`), without any argument, then and add four layers to it by calling its `add()` method:
* a `Flatten` layer (`keras.layers.Flatten`) to convert each 28x28 image to a single row of 784 pixel values. Since it is the first layer in your model, you should specify the `input_shape` argument, leaving out the batch size: `[28, 28]`.
* a `Dense` layer (`keras.layers.Dense`) with 300 neurons (aka units), and the `"relu"` activation function.
* Another `Dense` layer with 100 neurons, also with the `"relu"` activation function.
* A final `Dense` layer with 10 neurons (one per class), and with the `"softmax"` activation function to ensure that the sum of all the estimated class probabilities for each image is equal to 1.
```
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
```
### 2.2)
Alternatively, you can pass a list containing the 4 layers to the constructor of the `Sequential` model. The model's `layers` attribute holds the list of layers.
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.layers
```
### 2.3)
Call the model's `summary()` method and examine the output. Also, try using `keras.utils.plot_model()` to save an image of your model's architecture. Alternatively, you can uncomment the following code to display the image within Jupyter.
```
model.summary()
keras.utils.plot_model(model, "my_mnist_model.png", show_shapes=True)
%%html
<img src="my_mnist_model.png" />
```
**Warning**: at the present, you need `from tensorflow.python.keras.utils.vis_utils import model_to_dot`, instead of simply `keras.utils.model_to_dot`. See [TensorFlow issue 24639](https://github.com/tensorflow/tensorflow/issues/24639).
```
from IPython.display import SVG
from tensorflow.python.keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
```
### 2.4)
After a model is created, you must call its `compile()` method to specify the `loss` function and the `optimizer` to use. In this case, you want to use the `"sparse_categorical_crossentropy"` loss, and the `"sgd"` optimizer (stochastic gradient descent). Moreover, you can optionally specify a list of additional metrics that should be measured during training. In this case you should specify `metrics=["accuracy"]`. **Note**: you can find more loss functions in `keras.losses`, more metrics in `keras.metrics` and more optimizers in `keras.optimizers`.
```
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd", metrics=["accuracy"])
```
### 2.5)
Now your model is ready to be trained. Call its `fit()` method, passing it the input features (`X_train`) and the target classes (`y_train`). Set `epochs=10` (or else it will just run for a single epoch). You can also (optionally) pass the validation data by setting `validation_data=(X_valid, y_valid)`. If you do, Keras will compute the loss and the additional metrics (the accuracy in this case) on the validation set at the end of each epoch. If the performance on the training set is much better than on the validation set, your model is probably overfitting the training set (or there is a bug, such as a mismatch between the training set and the validation set).
**Note**: the `fit()` method will return a `History` object containing training stats. Make sure to preserve it (`history = model.fit(...)`).
```
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
### 2.6)
Try running `pd.DataFrame(history.history).plot()` to plot the learning curves. To make the graph more readable, you can also set `figsize=(8, 5)`, call `plt.grid(True)` and `plt.gca().set_ylim(0, 1)`.
```
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
plot_learning_curves(history)
```
### 2.7)
Try running `model.fit()` again, and notice that training continues where it left off.
```
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
### 2.8)
Call the model's `evaluate()` method, passing it the test set (`X_test` and `y_test`). This will compute the loss (cross-entropy) on the test set, as well as all the additional metrics (in this case, the accuracy). Your model should achieve over 80% accuracy on the test set.
```
model.evaluate(X_test, y_test)
```
### 2.9)
Define `X_new` as the first 10 instances of the test set. Call the model's `predict()` method to estimate the probability of each class for each instance (for better readability, you may use the output array's `round()` method):
```
n_new = 10
X_new = X_test[:n_new]
y_proba = model.predict(X_new)
y_proba.round(2)
```
### 2.10)
Often, you may only be interested in the most likely class. Use `np.argmax()` to get the class ID of the most likely class for each instance. **Tip**: you want to set `axis=1`.
```
y_pred = y_proba.argmax(axis=1)
y_pred
```
### 2.11)
Call the model's `predict_classes()` method for `X_new`. You should get the same result as above.
```
y_pred = model.predict_classes(X_new)
y_pred
```
### 2.12)
(Optional) It is often useful to know how confident the model is for each prediction. Try finding the estimated probability for each predicted class using `np.max()`.
```
y_proba.max(axis=1).round(2)
```
### 2.13)
(Optional) It is frequent to want the top k classes and their estimated probabilities rather just the most likely class. You can use `np.argsort()` for this.
```
k = 3
top_k = np.argsort(-y_proba, axis=1)[:, :k]
top_k
row_indices = np.tile(np.arange(len(top_k)), [k, 1]).T
y_proba[row_indices, top_k].round(2)
```

## Exercise 3 – Scale the features
### 3.1)
When using Gradient Descent, it is usually best to ensure that the features all have a similar scale, preferably with a Normal distribution. Try to standardize the pixel values and see if this improves the performance of your neural network.
**Tips**:
* For each feature (pixel intensity), you must subtract the `mean()` of that feature (across all instances, so use `axis=0`) and divide by its standard deviation (`std()`, again `axis=0`). Alternatively, you can use Scikit-Learn's `StandardScaler`.
* Make sure you compute the means and standard deviations on the training set, and use these statistics to scale the training set, the validation set and the test set (you should not fit the validation set or the test set, and computing the means and standard deviations counts as "fitting").
### 3.2)
Plot the learning curves. Do they look better than earlier?

## Exercise 3 – Solution
### 3.1)
When using Gradient Descent, it is usually best to ensure that the features all have a similar scale, preferably with a Normal distribution. Try to standardize the pixel values and see if this improves the performance of your neural network.
```
pixel_means = X_train.mean(axis = 0)
pixel_stds = X_train.std(axis = 0)
X_train_scaled = (X_train - pixel_means) / pixel_stds
X_valid_scaled = (X_valid - pixel_means) / pixel_stds
X_test_scaled = (X_test - pixel_means) / pixel_stds
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)
X_valid_scaled = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)
X_test_scaled = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd", metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=20,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
```
### 3.2)
Plot the learning curves. Do they look better than earlier?
```
plot_learning_curves(history)
```

## Exercise 4 – Use Callbacks
### 4.1)
The `fit()` method accepts a `callbacks` argument. Try training your model with a large number of epochs, a validation set, and with a few callbacks from `keras.callbacks`:
* `TensorBoard`: specify a log directory. It should be a subdirectory of a root logdir, such as `./my_logs/run_1`, and it should be different every time you train your model. You can use a timestamp in the subdirectory's path to ensure that it changes at every run.
* `EarlyStopping`: specify `patience=5`
* `ModelCheckpoint`: specify the path of the checkpoint file to save (e.g., `"my_mnist_model.h5"`) and set `save_best_only=True`
Notice that the `EarlyStopping` callback will interrupt training before it reaches the requested number of epochs. This reduces the risk of overfitting.
```
root_logdir = os.path.join(os.curdir, "my_logs")
```
### 4.2)
The Jupyter plugin for tensorboard was loaded at the beginning of this notebook (`%load_ext tensorboard.notebook`), so you can now simply start it by using the `%tensorboard` magic command. Explore the various tabs available, in particular the SCALARS tab to view learning curves, the GRAPHS tab to view the computation graph, and the PROFILE tab which is very useful to identify bottlenecks if you run into performance issues.
```
%tensorboard --logdir=./my_logs
```
### 4.3)
The early stopping callback only stopped training after 10 epochs without progress, so your model may already have started to overfit the training set. Fortunately, since the `ModelCheckpoint` callback only saved the best models (on the validation set), the last saved model is the best on the validation set, so try loading it using `keras.models.load_model()`. Finally evaluate it on the test set.
### 4.4)
Look at the list of available callbacks at https://keras.io/callbacks/

## Exercise 4 – Solution
### 4.1)
The `fit()` method accepts a `callbacks` argument. Try training your model with a large number of epochs, a validation set, and with a few callbacks from `keras.callbacks`:
* `TensorBoard`: specify a log directory. It should be a subdirectory of a root logdir, such as `./my_logs/run_1`, and it should be different every time you train your model. You can use a timestamp in the subdirectory's path to ensure that it changes at every run.
* `EarlyStopping`: specify `patience=5`
* `ModelCheckpoint`: specify the path of the checkpoint file to save (e.g., `"my_mnist_model.h5"`) and set `save_best_only=True`
Notice that the `EarlyStopping` callback will interrupt training before it reaches the requested number of epochs. This reduces the risk of overfitting.
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd", metrics=["accuracy"])
logdir = os.path.join(root_logdir, "run_{}".format(time.time()))
callbacks = [
keras.callbacks.TensorBoard(logdir),
keras.callbacks.EarlyStopping(patience=5),
keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True),
]
history = model.fit(X_train_scaled, y_train, epochs=50,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
```
### 4.2)
Done
### 4.3)
The early stopping callback only stopped training after 10 epochs without progress, so your model may already have started to overfit the training set. Fortunately, since the `ModelCheckpoint` callback only saved the best models (on the validation set), the last saved model is the best on the validation set, so try loading it using `keras.models.load_model()`. Finally evaluate it on the test set.
```
model = keras.models.load_model("my_mnist_model.h5")
model.evaluate(X_valid_scaled, y_valid)
```
### 4.4)
Look at the list of available callbacks at https://keras.io/callbacks/

## Exercise 5 – A neural net for regression
### 5.1)
Load the California housing dataset using `sklearn.datasets.fetch_california_housing`. This returns an object with a `DESCR` attribute describing the dataset, a `data` attribute with the input features, and a `target` attribute with the labels. The goal is to predict the price of houses in a district (a census block) given some stats about that district. This is a regression task (predicting values).
### 5.2)
Split the dataset into a training set, a validation set and a test set using Scikit-Learn's `sklearn.model_selection.train_test_split()` function.
### 5.3)
Scale the input features (e.g., using a `sklearn.preprocessing.StandardScaler`). Once again, don't forget that you should not fit the validation set or the test set, only the training set.
### 5.4)
Now build, train and evaluate a neural network to tackle this problem. Then use it to make predictions on the test set.
**Tips**:
* Since you are predicting a single value per district (the median house price), there should only be one neuron in the output layer.
* Usually for regression tasks you don't want to use any activation function in the output layer (in some cases you may want to use `"relu"` or `"softplus"` if you want to constrain the predicted values to be positive, or `"sigmoid"` or `"tanh"` if you want to constrain the predicted values to 0-1 or -1-1).
* A good loss function for regression is generally the `"mean_squared_error"` (aka `"mse"`). When there are many outliers in your dataset, you may prefer to use the `"mean_absolute_error"` (aka `"mae"`), which is a bit less precise but less sensitive to outliers.

## Exercise 5 – Solution
### 5.1)
Load the California housing dataset using `sklearn.datasets.fetch_california_housing`. This returns an object with a `DESCR` attribute describing the dataset, a `data` attribute with the input features, and a `target` attribute with the labels. The goal is to predict the price of houses in a district (a census block) given some stats about that district. This is a regression task (predicting values).
```
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
print(housing.DESCR)
housing.data.shape
housing.target.shape
```
### 5.2)
Split the dataset into a training set, a validation set and a test set using Scikit-Learn's `sklearn.model_selection.train_test_split()` function.
```
from sklearn.model_selection import train_test_split
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)
len(X_train), len(X_valid), len(X_test)
```
### 5.3)
Scale the input features (e.g., using a `sklearn.preprocessing.StandardScaler`). Once again, don't forget that you should not fit the validation set or the test set, only the training set.
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_valid_scaled = scaler.transform(X_valid)
X_test_scaled = scaler.transform(X_test)
```
### 5.4)
Now build, train and evaluate a neural network to tackle this problem. Then use it to make predictions on the test set.
```
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
model.compile(loss="mean_squared_error", optimizer="sgd")
callbacks = [keras.callbacks.EarlyStopping(patience=10)]
history = model.fit(X_train_scaled, y_train,
validation_data=(X_valid_scaled, y_valid), epochs=100,
callbacks=callbacks)
model.evaluate(X_test_scaled, y_test)
model.predict(X_test_scaled)
plot_learning_curves(history)
```

## Exercise 6 – Hyperparameter search
### 6.1)
Try training your model multiple times, with different a learning rate each time (e.g., 1e-4, 3e-4, 1e-3, 3e-3, 3e-2), and compare the learning curves. For this, you need to create a `keras.optimizers.SGD` optimizer and specify the `learning_rate` in its constructor, then pass this `SGD` instance to the `compile()` method using the `optimizer` argument.
### 6.2)
Let's look at a more sophisticated way to tune hyperparameters. Create a `build_model()` function that takes three arguments, `n_hidden`, `n_neurons`, `learning_rate`, and builds, compiles and returns a model with the given number of hidden layers, the given number of neurons and the given learning rate. It is good practice to give a reasonable default value to each argument.
### 6.3)
Create a `keras.wrappers.scikit_learn.KerasRegressor` and pass the `build_model` function to the constructor. This gives you a Scikit-Learn compatible predictor. Try training it and using it to make predictions. Note that you can pass the `n_epochs`, `callbacks` and `validation_data` to the `fit()` method.
### 6.4)
Use a `sklearn.model_selection.RandomizedSearchCV` to search the hyperparameter space of your `KerasRegressor`.
**Tips**:
* create a `param_distribs` dictionary where each key is the name of a hyperparameter you want to fine-tune (e.g., `"n_hidden"`), and each value is the list of values you want to explore (e.g., `[0, 1, 2, 3]`), or a Scipy distribution from `scipy.stats`.
* You can use the reciprocal distribution for the learning rate (e.g, `reciprocal(3e-3, 3e-2)`).
* Create a `RandomizedSearchCV`, passing the `KerasRegressor` and the `param_distribs` to its constructor, as well as the number of iterations (`n_iter`), and the number of cross-validation folds (`cv`). If you are short on time, you can set `n_iter=10` and `cv=3`. You may also want to set `verbose=2`.
* Finally, call the `RandomizedSearchCV`'s `fit()` method on the training set. Once again you can pass it `n_epochs`, `validation_data` and `callbacks` if you want to.
* The best parameters found will be available in the `best_params_` attribute, the best score will be in `best_score_`, and the best model will be in `best_estimator_`.
### 6.5)
Evaluate the best model found on the test set. You can either use the best estimator's `score()` method, or get its underlying Keras model *via* its `model` attribute, and call this model's `evaluate()` method. Note that the estimator returns the negative mean square error (it's a score, not a loss, so higher is better).
### 6.6)
Finally, save the best Keras model found. **Tip**: it is available via the best estimator's `model` attribute, and just need to call its `save()` method.
**Tip**: while a randomized search is nice and simple, there are more powerful (but complex) options available out there for hyperparameter search, for example:
* [Hyperopt](https://github.com/hyperopt/hyperopt)
* [Hyperas](https://github.com/maxpumperla/hyperas)
* [Sklearn-Deap](https://github.com/rsteca/sklearn-deap)
* [Scikit-Optimize](https://scikit-optimize.github.io/)
* [Spearmint](https://github.com/JasperSnoek/spearmint)
* [PyMC3](https://docs.pymc.io/)
* [GPFlow](https://gpflow.readthedocs.io/)
* [Yelp/MOE](https://github.com/Yelp/MOE)
* Commercial services such as: [Google Cloud ML Engine](https://cloud.google.com/ml-engine/docs/tensorflow/using-hyperparameter-tuning), [Arimo](https://arimo.com/) or [Oscar](http://oscar.calldesk.ai/)

## Exercise 6 – Solution
### 6.1)
Try training your model multiple times, with different a learning rate each time (e.g., 1e-4, 3e-4, 1e-3, 3e-3, 3e-2), and compare the learning curves. For this, you need to create a `keras.optimizers.SGD` optimizer and specify the `learning_rate` in its constructor, then pass this `SGD` instance to the `compile()` method using the `optimizer` argument.
```
learning_rates = [1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2]
histories = []
for learning_rate in learning_rates:
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1)
])
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="mean_squared_error", optimizer=optimizer)
callbacks = [keras.callbacks.EarlyStopping(patience=10)]
history = model.fit(X_train_scaled, y_train,
validation_data=(X_valid_scaled, y_valid), epochs=100,
callbacks=callbacks)
histories.append(history)
for learning_rate, history in zip(learning_rates, histories):
print("Learning rate:", learning_rate)
plot_learning_curves(history)
```
### 6.2)
Let's look at a more sophisticated way to tune hyperparameters. Create a `build_model()` function that takes three arguments, `n_hidden`, `n_neurons`, `learning_rate`, and builds, compiles and returns a model with the given number of hidden layers, the given number of neurons and the given learning rate. It is good practice to give a reasonable default value to each argument.
```
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3):
model = keras.models.Sequential()
options = {"input_shape": X_train.shape[1:]}
for layer in range(n_hidden + 1):
model.add(keras.layers.Dense(n_neurons, activation="relu", **options))
options = {}
model.add(keras.layers.Dense(1, **options))
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="mse", optimizer=optimizer)
return model
```
### 6.3)
Create a `keras.wrappers.scikit_learn.KerasRegressor` and pass the `build_model` function to the constructor. This gives you a Scikit-Learn compatible predictor. Try training it and using it to make predictions. Note that you can pass the `n_epochs`, `callbacks` and `validation_data` to the `fit()` method.
```
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
keras_reg.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
keras_reg.predict(X_test_scaled)
```
### 6.4)
Use a `sklearn.model_selection.RandomizedSearchCV` to search the hyperparameter space of your `KerasRegressor`.
```
from scipy.stats import reciprocal
param_distribs = {
"n_hidden": [0, 1, 2, 3],
"n_neurons": np.arange(1, 100),
"learning_rate": reciprocal(3e-4, 3e-2),
}
from sklearn.model_selection import RandomizedSearchCV
rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=[keras.callbacks.EarlyStopping(patience=10)])
rnd_search_cv.best_params_
rnd_search_cv.best_score_
rnd_search_cv.best_estimator_
```
### 6.5)
Evaluate the best model found on the test set. You can either use the best estimator's `score()` method, or get its underlying Keras model *via* its `model` attribute, and call this model's `evaluate()` method. Note that the estimator returns the negative mean square error (it's a score, not a loss, so higher is better).
```
rnd_search_cv.score(X_test_scaled, y_test)
model = rnd_search_cv.best_estimator_.model
model.evaluate(X_test_scaled, y_test)
```
### 6.6)
Finally, save the best Keras model found. **Tip**: it is available via the best estimator's `model` attribute, and just need to call its `save()` method.
```
model.save("my_fine_tuned_housing_model.h5")
```

## Exercise 7 – The functional API
Not all neural network models are simply sequential. Some may have complex topologies. Some may have multiple inputs and/or multiple outputs. For example, a Wide & Deep neural network (see [paper](https://ai.google/research/pubs/pub45413)) connects all or part of the inputs directly to the output layer, as shown on the following diagram:
<img src="images/wide_and_deep_net.png" title="Wide and deep net" width=300 />
### 7.1)
Use Keras' functional API to implement a Wide & Deep network to tackle the California housing problem.
**Tips**:
* You need to create a `keras.layers.Input` layer to represent the inputs. Don't forget to specify the input `shape`.
* Create the `Dense` layers, and connect them by using them like functions. For example, `hidden1 = keras.layers.Dense(30, activation="relu")(input)` and `hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)`
* Use the `keras.layers.concatenate()` function to concatenate the input layer and the second hidden layer's output.
* Create a `keras.models.Model` and specify its `inputs` and `outputs` (e.g., `inputs=[input]`).
* Then use this model just like a `Sequential` model: you need to compile it, display its summary, train it, evaluate it and use it to make predictions.
### 7.2)
After the Sequential API and the Functional API, let's try the Subclassing API:
* Create a subclass of the `keras.models.Model` class.
* Create all the layers you need in the constructor (e.g., `self.hidden1 = keras.layers.Dense(...)`).
* Use the layers to process the `input` in the `call()` method, and return the output.
* Note that you do not need to create a `keras.layers.Input` in this case.
* Also note that `self.output` is used by Keras, so you should use another name for the output layer (e.g., `self.output_layer`).
**When should you use the Subclassing API?**
* Both the Sequential API and the Functional API are declarative: you first declare the list of layers you need and how they are connected, and only then can you feed your model with actual data. The models that these APIs build are just static graphs of layers. This has many advantages (easy inspection, debugging, saving, loading, sharing, etc.), and they cover the vast majority of use cases, but if you need to build a very dynamic model (e.g., with loops or conditional branching), or if you want to experiment with new ideas using an imperative programming style, then the Subclassing API is for you. You can pretty much do any computation you want in the `call()` method, possibly with loops and conditions, using Keras layers of even low-level TensorFlow operations.
* However, this extra flexibility comes at the cost of less transparency. Since the model is defined within the `call()` method, Keras cannot fully inspect it. All it sees is the list of model attributes (which include the layers you define in the constructor), so when you display the model summary you just see a list of unconnected layers. Consequently, you cannot save or load the model without writing extra code. So this API is best used only when you really need the extra flexibility.
```
class MyModel(keras.models.Model):
def __init__(self):
super(MyModel, self).__init__()
# create layers here
def call(self, input):
# write any code here, using layers or even low-level TF code
return output
model = MyModel()
```
### 7.3)
Now suppose you want to send only features 0 to 4 directly to the output, and only features 2 to 7 through the hidden layers, as shown on the following diagram. Use the functional API to build, train and evaluate this model.
**Tips**:
* You need to create two `keras.layers.Input` (`input_A` and `input_B`)
* Build the model using the functional API, as above, but when you build the `keras.models.Model`, remember to set `inputs=[input_A, input_B]`
* When calling `fit()`, `evaluate()` and `predict()`, instead of passing `X_train_scaled`, pass `(X_train_scaled_A, X_train_scaled_B)` (two NumPy arrays containing only the appropriate features copied from `X_train_scaled`).
<img src="images/multiple_inputs.png" title="Multiple inputs" width=300 />
### 7.4)
Build the multi-input and multi-output neural net represented in the following diagram.
<img src="images/multiple_inputs_and_outputs.png" title="Multiple inputs and outputs" width=400 />
**Why?**
There are many use cases in which having multiple outputs can be useful:
* Your task may require multiple outputs, for example, you may want to locate and classify the main object in a picture. This is both a regression task (finding the coordinates of the object's center, as well as its width and height) and a classification task.
* Similarly, you may have multiple independent tasks to perform based on the same data. Sure, you could train one neural network per task, but in many cases you will get better results on all tasks by training a single neural network with one output per task. This is because the neural network can learn features in the data that are useful across tasks.
* Another use case is as a regularization technique (i.e., a training constraint whose objective is to reduce overfitting and thus improve the model's ability to generalize). For example, you may want to add some auxiliary outputs in a neural network architecture (as shown in the diagram) to ensure that that the underlying part of the network learns something useful on its own, without relying on the rest of the network.
**Tips**:
* Building the model is pretty straightforward using the functional API. Just make sure you specify both outputs when creating the `keras.models.Model`, for example `outputs=[output, aux_output]`.
* Each output has its own loss function. In this scenario, they will be identical, so you can either specify `loss="mse"` (this loss will apply to both outputs) or `loss=["mse", "mse"]`, which does the same thing.
* The final loss used to train the whole network is just a weighted sum of all loss functions. In this scenario, you want most to give a much smaller weight to the auxiliary output, so when compiling the model, you must specify `loss_weights=[0.9, 0.1]`.
* When calling `fit()` or `evaluate()`, you need to pass the labels for all outputs. In this scenario the labels will be the same for the main output and for the auxiliary output, so make sure to pass `(y_train, y_train)` instead of `y_train`.
* The `predict()` method will return both the main output and the auxiliary output.

## Exercise 7 – Solution
### 7.1)
Use Keras' functional API to implement a Wide & Deep network to tackle the California housing problem.
```
input = keras.layers.Input(shape=X_train.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input], outputs=[output])
model.compile(loss="mean_squared_error", optimizer="sgd")
model.summary()
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.predict(X_test_scaled)
```
### 7.2)
After the Sequential API and the Functional API, let's try the Subclassing API:
* Create a subclass of the `keras.models.Model` class.
* Create all the layers you need in the constructor (e.g., `self.hidden1 = keras.layers.Dense(...)`).
* Use the layers to process the `input` in the `call()` method, and return the output.
* Note that you do not need to create a `keras.layers.Input` in this case.
* Also note that `self.output` is used by Keras, so you should use another name for the output layer (e.g., `self.output_layer`).
```
class MyModel(keras.models.Model):
def __init__(self):
super(MyModel, self).__init__()
self.hidden1 = keras.layers.Dense(30, activation="relu")
self.hidden2 = keras.layers.Dense(30, activation="relu")
self.output_ = keras.layers.Dense(1)
def call(self, input):
hidden1 = self.hidden1(input)
hidden2 = self.hidden2(hidden1)
concat = keras.layers.concatenate([input, hidden2])
output = self.output_(concat)
return output
model = MyModel()
model.compile(loss="mse", optimizer="sgd")
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
model.summary()
model.evaluate(X_test_scaled, y_test)
model.predict(X_test_scaled)
```
### 7.3)
Now suppose you want to send only features 0 to 4 directly to the output, and only features 2 to 7 through the hidden layers, as shown on the diagram. Use the functional API to build, train and evaluate this model.
```
input_A = keras.layers.Input(shape=[5])
input_B = keras.layers.Input(shape=[6])
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss="mean_squared_error", optimizer="sgd")
model.summary()
X_train_scaled_A = X_train_scaled[:, :5]
X_train_scaled_B = X_train_scaled[:, 2:]
X_valid_scaled_A = X_valid_scaled[:, :5]
X_valid_scaled_B = X_valid_scaled[:, 2:]
X_test_scaled_A = X_test_scaled[:, :5]
X_test_scaled_B = X_test_scaled[:, 2:]
history = model.fit([X_train_scaled_A, X_train_scaled_B], y_train, epochs=10,
validation_data=([X_valid_scaled_A, X_valid_scaled_B], y_valid))
model.evaluate([X_test_scaled_A, X_test_scaled_B], y_test)
model.predict([X_test_scaled_A, X_test_scaled_B])
```
### 7.4)
Build the multi-input and multi-output neural net represented in the diagram.
```
input_A = keras.layers.Input(shape=X_train_scaled_A.shape[1:])
input_B = keras.layers.Input(shape=X_train_scaled_B.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1)(concat)
aux_output = keras.layers.Dense(1)(hidden2)
model = keras.models.Model(inputs=[input_A, input_B],
outputs=[output, aux_output])
model.compile(loss="mean_squared_error", loss_weights=[0.9, 0.1],
optimizer="sgd")
model.summary()
history = model.fit([X_train_scaled_A, X_train_scaled_B], [y_train, y_train], epochs=10,
validation_data=([X_valid_scaled_A, X_valid_scaled_B], [y_valid, y_valid]))
model.evaluate([X_test_scaled_A, X_test_scaled_B], [y_test, y_test])
y_pred, y_pred_aux = model.predict([X_test_scaled_A, X_test_scaled_B])
y_pred
y_pred_aux
```

## Exercise 8 – Deep Nets
Let's go back to Fashion MNIST and build deep nets to tackle it. We need to load it, split it and scale it.
```
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)
X_valid_scaled = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)
X_test_scaled = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28)
```
### 8.1)
Build a sequential model with 20 hidden dense layers, with 100 neurons each, using the ReLU activation function, plus the output layer (10 neurons, softmax activation function). Try to train it for 10 epochs on Fashion MNIST and plot the learning curves. Notice that progress is very slow.
### 8.2)
Update the model to add a `BatchNormalization` layer after every hidden layer. Notice that performance progresses much faster per epoch, although computations are much more intensive. Display the model summary and notice all the non-trainable parameters (the scale $\gamma$ and offset $\beta$ parameters).
### 8.3)
Try moving the BN layers before the hidden layers' activation functions. Does this affect the model's performance?
### 8.4)
Remove all the BN layers, and just use the SELU activation function instead (always use SELU with LeCun Normal weight initialization). Notice that you get better performance than with BN but training is much faster. Isn't it marvelous? :-)
### 8.5)
Try training for 10 additional epochs, and notice that the model starts overfitting. Try adding a Dropout layer (with a 50% dropout rate) just before the output layer. Does it reduce overfitting? What about the final validation accuracy?
**Warning**: you should not use regular Dropout, as it breaks the self-normalizing property of the SELU activation function. Instead, use AlphaDropout, which is designed to work with SELU.

## Exercise 8 – Solution
### 8.1)
Build a sequential model with 20 hidden dense layers, with 100 neurons each, using the ReLU activation function, plus the output layer (10 neurons, softmax activation function). Try to train it for 10 epochs on Fashion MNIST and plot the learning curves. Notice that progress is very slow.
```
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd",
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
plot_learning_curves(history)
```
### 8.2)
Update the model to add a `BatchNormalization` layer after every hidden layer. Notice that performance progresses much faster per epoch, although computations are much more intensive. Display the model summary and notice all the non-trainable parameters (the scale $\gamma$ and offset $\beta$ parameters).
```
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd",
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
plot_learning_curves(history)
model.summary()
```
### 8.3)
Try moving the BN layers before the hidden layers' activation functions. Does this affect the model's performance?
```
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
model.add(keras.layers.Dense(100))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd",
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
plot_learning_curves(history)
```
### 8.4)
Remove all the BN layers, and just use the SELU activation function instead (always use SELU with LeCun Normal weight initialization). Notice that you get better performance than with BN but training is much faster. Isn't it marvelous? :-)
```
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd",
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
plot_learning_curves(history)
```
### 8.5)
Try training for 10 additional epochs, and notice that the model starts overfitting. Try adding a Dropout layer (with a 50% dropout rate) just before the output layer. Does it reduce overfitting? What about the final validation accuracy?
```
history = model.fit(X_train_scaled, y_train, epochs=10,
validation_data=(X_valid_scaled, y_valid))
plot_learning_curves(history)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
for _ in range(20):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.AlphaDropout(rate=0.5))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd",
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=20,
validation_data=(X_valid_scaled, y_valid))
plot_learning_curves(history)
```
| true |
code
| 0.802507 | null | null | null | null |
|
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
# Numerical Integration
The definite integral $\int_a^b f(x) dx$ can be computed exactly if the primitive $F$ of $f$ is known, e.g.
```
f = lambda x: np.divide(np.dot(x,np.exp(x)),np.power(x+1,2))
F = lambda x: np.divide(np.exp(x),(x+1))
a = 0; b = 1;
I_ex = F(b) - F(a)
I_ex
```
In many cases the primitive is unknown though and one has to resort to numerical integration. The idea is to approximate the integrand by a function whose integral is known, e.g. piecewise linear interpolation.
- [Riemans Rule](https://www.math.ubc.ca/~pwalls/math-python/integration/riemann-sums/): sum of rectangles
- [Trapezoid Rule](https://www.math.ubc.ca/~pwalls/math-python/integration/trapezoid-rule/): sum of trapezoids
or piecewise quadratic interpolation
- [Simpson Rule](https://www.math.ubc.ca/~pwalls/math-python/integration/simpsons-rule/): quadratic polynomial on each subinterval
Trapezoids:
The definite integral of $f(x)$ is equal to the (net) area under the curve $y=f(x)$ over the interval $[a,b]$. Riemann sums approximate definite integrals by using sums of rectangles to approximate the area.
The trapezoid rule gives a better approximation of a definite integral by summing the areas of the trapezoids connecting the points
$$(x_{i-1},0),(x_i,0),(x_{i-1},f(x_{i-1})),(x_i,f(x_1))$$
for each subinterval $[x_{i-1},x_i]$ of a partition. Note that the area of each trapezoid is the sum of a rectangle and a triangle
$$(x_i-x_{i-1})f(x_{i-1}+\frac{1}{2}(x_i-x_{i-1})(f(x_i)-f(x_{i-1}))=\frac{1}{2}(f(x_i)+f(x_{i-1}))(x_i-x_{i-1})$$
For example, we can use a single trapezoid to approximate:
$$\int_0^1=e^{-x^2}dx$$
First, let's plot the curve $y=e^{-x^2}$ and the trapezoid on the interval $[0,1]$:
```
x = np.linspace(-0.5,1.5,100)
y = np.exp(-x**2)
plt.plot(x,y)
x0 = 0; x1 = 1;
y0 = np.exp(-x0**2); y1 = np.exp(-x1**2);
plt.fill_between([x0,x1],[y0,y1])
plt.xlim([-0.5,1.5]); plt.ylim([0,1.5]);
plt.show()
```
Approximate the integral by the area of the trapezoid:
```
A = 0.5*(y1 + y0)*(x1 - x0)
print("Trapezoid area:", A)
```
## Trapezoid Rule
This choice leads to the trapezoidal rule. If the interval $[a,b]$ is divided into subintervals $[x_k, x_{k+1}]$ of the same length $h = (b-a)/n$, with $x_0 := a$ and $x_n := b$, the summed version reads
$$\int_a^b f(x) dx \approx \frac{h}{2}(f(a) + f(b)) + h \sum_{k=1}^{n-1} f(x_k) =: T(h). $$
This is implemented in `trapez`. The error of the numerical integral is
$$\left| T(h) - \int_a^b f(x) dx \right| = \frac{(b-a)h^2}{12} |f''(\xi)|, \quad \xi\in[a,b]$$
so if the number of intervals is doubled (and hence is halved) then the error is expected to decrease by a factor of 4. Let's check:
Let's write a function called trapz which takes input parameters $f,a,b$ and $N$ and returns the approximation $T_N(f)$. Furthermore, let's assign default value $N=50$. ([source](https://www.math.ubc.ca/~pwalls/math-python/integration/trapezoid-rule/))
```
def trapz(f,a,b,N=50):
'''Approximate the integral of f(x) from a to b by the trapezoid rule.
The trapezoid rule approximates the integral \int_a^b f(x) dx by the sum:
(dx/2) \sum_{k=1}^N (f(x_k) + f(x_{k-1}))
where x_k = a + k*dx and dx = (b - a)/N.
Parameters
----------
f : function
Vectorized function of a single variable
a , b : numbers
Interval of integration [a,b]
N : integer
Number of subintervals of [a,b]
Returns
-------
float
Approximation of the integral of f(x) from a to b using the
trapezoid rule with N subintervals of equal length.
Examples
--------
>>> trapz(np.sin,0,np.pi/2,1000)
0.9999997943832332
'''
x = np.linspace(a,b,N+1) # N+1 points make N subintervals
y = f(x)
y_right = y[1:] # right endpoints
y_left = y[:-1] # left endpoints
dx = (b - a)/N
T = (dx/2) * np.sum(y_right + y_left)
return T
```
Let's test our function on an integral where we know the answer
$$\int_0^1 3x^2 dx=1$$
```
trapz(lambda x : 3*x**2,0,1,10000)
```
The SciPy subpackage `scipy.integrate` contains several functions for approximating definite integrals and numerically solving differential equations. Let's import the subpackage under the name `spi`.
```
import scipy.integrate as spi
```
The function scipy.integrate.trapz computes the approximation of a definite by the trapezoid rule. Consulting the documentation, we see that all we need to do it supply arrays of $x$ and $y$ values for the integrand and `scipy.integrate.trapz` returns the approximation of the integral using the trapezoid rule. The number of points we give to `scipy.integrate.trapz` is up to us but we have to remember that more points gives a better approximation but it takes more time to compute!
```
N = 10000; a = 0; b = 1;
x = np.linspace(a,b,N+1)
y = 3*x**2
approximation = spi.trapz(y,x)
print(approximation)
```
## Simpson Rule
Simpson's rule uses a quadratic polynomial on each subinterval of a partition to approximate the function $f(x)$ and to compute the definite integral. This is an improvement over the trapezoid rule which approximates $f(x)$ by a straight line on each subinterval of a partition.
Here $[a,b]$ is divided into an even number $2n$ of intervals, so $h=(b-a)/(2n)$.
The formula for Simpson's rule is
$$\int_a^b f(x) dx \approx \frac{h}{3} \left( f(a) + f(b) + 4 \sum_{k=1}^{n} f(x_{2k-1}) + 2 \sum_{k=1}^{n-1} f(x_{2k}) \right) =: S(h). $$
The error goes like $h^4$ (instead of $h^2$ for the trapezoidal rule):
$$\left| S(h) - \int_a^b f(x) dx \right| = \frac{(b-a)h^4}{180} |f^{(4)}(\xi)|, \quad \xi\in[a,b].$$
So when the number of intervals is doubled, the error should decrease by a factor of 16:
Let's write a function called simps which takes input parameters $f,a,b$ and $N$ and returns the approximation $S_N(f)$. Furthermore, let's assign a default value $N=50$.
```
def simps(f,a,b,N=50):
'''Approximate the integral of f(x) from a to b by Simpson's rule.
Simpson's rule approximates the integral \int_a^b f(x) dx by the sum:
(dx/3) \sum_{k=1}^{N/2} (f(x_{2i-2} + 4f(x_{2i-1}) + f(x_{2i}))
where x_i = a + i*dx and dx = (b - a)/N.
Parameters
----------
f : function
Vectorized function of a single variable
a , b : numbers
Interval of integration [a,b]
N : (even) integer
Number of subintervals of [a,b]
Returns
-------
float
Approximation of the integral of f(x) from a to b using
Simpson's rule with N subintervals of equal length.
Examples
--------
>>> simps(lambda x : 3*x**2,0,1,10)
1.0
'''
if N % 2 == 1:
raise ValueError("N must be an even integer.")
dx = (b-a)/N
x = np.linspace(a,b,N+1)
y = f(x)
S = dx/3 * np.sum(y[0:-1:2] + 4*y[1::2] + y[2::2])
return S
```
Let's test our function on an integral where we know the answer
$$\int_0^1 3x^2 dx=1$$
```
simps(lambda x : 3*x**2,0,1,10)
```
The SciPy subpackage `scipy.integrate` contains several functions for approximating definite integrals and numerically solving differential equations. Let's import the subpackage under the name spi.
```
import scipy.integrate as spi
```
The function `scipy.integrate.simps` computes the approximation of a definite integral by Simpson's rule. Consulting the documentation, we see that all we need to do it supply arrays of $x$ and $y$ values for the integrand and `scipy.integrate.simps` returns the approximation of the integral using Simpson's rule.
```
N = 10; a = 0; b = 1;
x = np.linspace(a,b,N+1)
y = 3*x**2
approximation = spi.simps(y,x)
print(approximation)
```
| true |
code
| 0.648995 | null | null | null | null |
|
```
# install composer, hiding output to keep the notebook clean
! pip install mosaicml > /dev/null 2>&1
```
# Using the Functional API
In this tutorial, we'll see an example of using Composer's algorithms in a standalone fashion with no changes to the surrounding code and no requirement to use the Composer trainer. We'll be training a simple model on CIFAR-10, similar to the [PyTorch classifier tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html). Because we'll be using a toy model trained for only a few epochs, we won't get the same speed or accuracy gains we might expect from a more realistic problem. However, this notebook should still serve as a useful illustration of how to use various algorithms. For examples of more realistic results, see the MosaicML [Explorer](https://app.mosaicml.com/explorer/imagenet).
First, we need to define our original model, dataloader, and training loop. Let's start with the dataloader:
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
datadir = './data'
batch_size = 1024
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
trainset = torchvision.datasets.CIFAR10(root=datadir, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=datadir, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
```
As you can see, we compose two transforms, one which transforms the images to tensors and another that normalizes them. We apply these transformations to both the train and test sets. Now, let's define our model. We're going to use a toy convolutional neural network so that the training epochs finish quickly.
```
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=(3, 3), stride=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=(3, 3))
self.norm = nn.BatchNorm2d(32)
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(32, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.conv2(x)
x = F.relu(self.norm(x))
x = torch.flatten(self.pool(x), 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
```
Finally, let's write a simple training loop that prints the accuracy on the test set at the end of each epoch. We'll just run a few epochs for brevity.
```
from tqdm.notebook import tqdm
import composer.functional as cf
num_epochs = 5
def train_and_eval(model, train_loader, test_loader):
torch.manual_seed(42)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
opt = torch.optim.Adam(model.parameters())
for epoch in range(num_epochs):
print(f"---- Beginning epoch {epoch} ----")
model.train()
progress_bar = tqdm(train_loader)
for X, y in progress_bar:
X = X.to(device)
y_hat = model(X).to(device)
loss = F.cross_entropy(y_hat, y)
progress_bar.set_postfix_str(f"train loss: {loss.detach().numpy():.4f}")
loss.backward()
opt.step()
opt.zero_grad()
model.eval()
num_right = 0
eval_size = 0
for X, y in test_loader:
y_hat = model(X.to(device)).to(device)
num_right += (y_hat.argmax(dim=1) == y).sum().numpy()
eval_size += len(y)
acc_percent = 100 * num_right / eval_size
print(f"Epoch {epoch} validation accuracy: {acc_percent:.2f}%")
```
Great. Now, let's instantiate this baseline model and see how it fares on our dataset.
```
model = Net()
train_and_eval(model, trainloader, testloader)
```
Now that we have this baseline, let's add algorithms to improve our data pipeline and model. We'll start by adding some data augmentation, accessed via `cf.colout_batch`.
```
# create dataloaders for the train and test sets
shared_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
train_transforms = shared_transforms[:] + [cf.colout_batch]
test_transform = transforms.Compose(shared_transforms)
train_transform = transforms.Compose(train_transforms)
trainset = torchvision.datasets.CIFAR10(root=datadir, train=True,
download=True, transform=train_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=datadir, train=False,
download=True, transform=test_transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=2)
```
Let's see how our model does with just these changes.
```
model = Net()
# only use one data augmentation since our small model runs quickly
# and allows the dataloader little time to do anything fancy
train_and_eval(model, trainloader, testloader)
```
As we might expect, adding data augmentation doesn't help us when we aren't training long enough to start overfitting.
Let's try using some algorithms that modify the model. We're going to keep things simple and just add a [Squeeze-and-Excitation](https://docs.mosaicml.com/en/latest/method_cards/squeeze_excite.html) module after the larger of the two conv2d operations in our model.
```
# squeeze-excite can add a lot of overhead for small
# conv2d operations, so only add it after convs with a
# minimum number of channels
cf.apply_squeeze_excite(model, latent_channels=64, min_channels=16)
```
Now let's see how our model does with the above algorithm applied.
```
train_and_eval(model, trainloader, testloader)
```
Adding squeeze-excite gives us another few percentage points of accuracy and does so with little decrease in the number of iterations per second. Great!
Of course, this is a toy model and dataset, but it serves to illustrate how to use Composer's algorithms inside your own training loops, with minimal changes to your code. If you hit any problems or have questions, feel free to [open an issue](https://github.com/mosaicml/composer/issues/new/) or reach out to us [on Slack](https://join.slack.com/t/mosaicml-community/shared_invite/zt-w0tiddn9-WGTlRpfjcO9J5jyrMub1dg).
| true |
code
| 0.793086 | null | null | null | null |
|
     
     
     
     
     
   
[Home Page](../../START_HERE.ipynb)
[Previous Notebook](Challenge.ipynb)
     
     
     
     
[1](Challenge.ipynb)
[2]
# Challenge - Gene Expression Classification - Workbook
### Introduction
This notebook walks through an end-to-end GPU machine learning workflow where cuDF is used for processing the data and cuML is used to train machine learning models on it.
After completing this excercise, you will be able to use cuDF to load data from disk, combine tables, scale features, use one-hote encoding and even write your own GPU kernels to efficiently transform feature columns. Additionaly you will learn how to pass this data to cuML, and how to train ML models on it. The trained model is saved and it will be used for prediction.
It is not required that the user is familiar with cuDF or cuML. Since our aim is to go from ETL to ML training, a detailed introduction is out of scope for this notebook. We recommend [Introduction to cuDF](../../CuDF/01-Intro_to_cuDF.ipynb) for additional information.
### Problem Statement:
We are trying to classify patients with acute myeloid leukemia (AML) and acute lymphoblastic leukemia (ALL) using machine learning (classification) algorithms. This dataset comes from a proof-of-concept study published in 1999 by Golub et al. It showed how new cases of cancer could be classified by gene expression monitoring (via DNA microarray) and thereby provided a general approach for identifying new cancer classes and assigning tumors to known classes.
Here is the dataset link: https://www.kaggle.com/crawford/gene-expression.
## Here is the list of exercises and modules to work on in the lab:
- Convert the serial Pandas computations to CuDF operations.
- Utilize CuML to accelerate the machine learning models.
- Experiment with Dask to create a cluster and distribute the data and scale the operations.
You will start writing code from <a href='#dask1'>here</a>, but make sure you execute the data processing blocks to understand the dataset.
### 1. Data Processing
The first step is downloading the dataset and putting it in the data directory, for using in this tutorial. Download the dataset here, and place it in (host/data) folder. Now we will import the necessary libraries.
```
import numpy as np; print('NumPy Version:', np.__version__)
import pandas as pd
import sys
import sklearn; print('Scikit-Learn Version:', sklearn.__version__)
from sklearn import preprocessing
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder, StandardScaler
import cudf
import cupy
# import for model building
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
from cuml.metrics.regression import r2_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn import linear_model
from sklearn.metrics import accuracy_score
from sklearn import model_selection, datasets
from cuml.dask.common import utils as dask_utils
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
import dask_cudf
from cuml.dask.ensemble import RandomForestClassifier as cumlDaskRF
from sklearn.ensemble import RandomForestClassifier as sklRF
```
We'll read the dataframe into y from the csv file, view its dimensions and observe the first 5 rows of the dataframe.
```
%%time
y = pd.read_csv('../../../data/actual.csv')
print(y.shape)
y.head()
```
Let's convert our target variable categories to numbers.
```
y['cancer'].value_counts()
# Recode label to numeric
y = y.replace({'ALL':0,'AML':1})
labels = ['ALL', 'AML'] # for plotting convenience later on
```
Read the training and test data provided in the challenge from the data folder. View their dimensions.
```
# Import training data
df_train = pd.read_csv('../../../data/data_set_ALL_AML_train.csv')
print(df_train.shape)
# Import testing data
df_test = pd.read_csv('../../../data/data_set_ALL_AML_independent.csv')
print(df_test.shape)
```
Observe the first few rows of the train dataframe and the data format.
```
df_train.head()
```
Observe the first few rows of the test dataframe and the data format.
```
df_test.head()
```
As we can see, the data set has categorical values but only for the columns starting with "call". We won't use the columns having categorical values, but remove them.
```
# Remove "call" columns from training and testing data
train_to_keep = [col for col in df_train.columns if "call" not in col]
test_to_keep = [col for col in df_test.columns if "call" not in col]
X_train_tr = df_train[train_to_keep]
X_test_tr = df_test[test_to_keep]
```
Rename the columns and reindex for formatting purposes and ease in reading the data.
```
train_columns_titles = ['Gene Description', 'Gene Accession Number', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',
'26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38']
X_train_tr = X_train_tr.reindex(columns=train_columns_titles)
test_columns_titles = ['Gene Description', 'Gene Accession Number','39', '40', '41', '42', '43', '44', '45', '46',
'47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72']
X_test_tr = X_test_tr.reindex(columns=test_columns_titles)
```
We will take the transpose of the dataframe so that each row is a patient and each column is a gene.
```
X_train = X_train_tr.T
X_test = X_test_tr.T
print(X_train.shape)
X_train.head()
```
Just clearning the data, removing extra columns and converting to numerical values.
```
# Clean up the column names for training and testing data
X_train.columns = X_train.iloc[1]
X_train = X_train.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
# Clean up the column names for Testing data
X_test.columns = X_test.iloc[1]
X_test = X_test.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
print(X_train.shape)
print(X_test.shape)
X_train.head()
```
We have the 38 patients as rows in the training set, and the other 34 as rows in the testing set. Each of those datasets has 7129 gene expression features. But we haven't yet associated the target labels with the right patients. You will recall that all the labels are all stored in a single dataframe. Let's split the data so that the patients and labels match up across the training and testing dataframes.We are now splitting the data into train and test sets. We will subset the first 38 patient's cancer types.
```
X_train = X_train.reset_index(drop=True)
y_train = y[y.patient <= 38].reset_index(drop=True)
# Subset the rest for testing
X_test = X_test.reset_index(drop=True)
y_test = y[y.patient > 38].reset_index(drop=True)
```
Generate descriptive statistics to analyse the data further.
```
X_train.describe()
```
Clearly there is some variation in the scales across the different features. Many machine learning models work much better with data that's on the same scale, so let's create a scaled version of the dataset.
```
X_train_fl = X_train.astype(float, 64)
X_test_fl = X_test.astype(float, 64)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train_fl)
X_test = scaler.transform(X_test_fl) # note that we transform rather than fit_transform
```
<a id='dask1'></a>
### 2. Conversion to CuDF Dataframe
Convert the pandas dataframes to CuDF dataframes to carry out the further CuML tasks.
```
#Modify the code in this cell
%%time
X_cudf_train = cudf.DataFrame() #Pass X train dataframe here
X_cudf_test = cudf.DataFrame() #Pass X test dataframe here
y_cudf_train = cudf.DataFrame() #Pass y train dataframe here
#y_cudf_test = cudf.Series(y_test.values) #Pass y test dataframe here
```
### 3. Model Building
#### Dask Integration
We will try using the Random Forests Classifier and implement using CuML and Dask.
#### Start Dask cluster
```
#Modify the code in this cell
# This will use all GPUs on the local host by default
cluster = LocalCUDACluster() #Set 1 thread per worker using arguments to cluster
c = Client() #Pass the cluster as an argument to Client
# Query the client for all connected workers
workers = c.has_what().keys()
n_workers = len(workers)
n_streams = 8 # Performance optimization
```
#### Define Parameters
In addition to the number of examples, random forest fitting performance depends heavily on the number of columns in a dataset and (especially) on the maximum depth to which trees are allowed to grow. Lower `max_depth` values can greatly speed up fitting, though going too low may reduce accuracy.
```
# Random Forest building parameters
max_depth = 12
n_bins = 16
n_trees = 1000
```
#### Distribute data to worker GPUs
```
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
n_partitions = n_workers
def distribute(X, y):
# First convert to cudf (with real data, you would likely load in cuDF format to start)
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X))
y_cudf = cudf.Series(y)
# Partition with Dask
# In this case, each worker will train on 1/n_partitions fraction of the data
X_dask = dask_cudf.from_cudf(X_cudf, npartitions=n_partitions)
y_dask = dask_cudf.from_cudf(y_cudf, npartitions=n_partitions)
# Persist to cache the data in active memory
X_dask, y_dask = \
dask_utils.persist_across_workers(c, [X_dask, y_dask], workers=workers)
return X_dask, y_dask
#Modify the code in this cell
X_train_dask, y_train_dask = distribute() #Pass train data as arguments here
X_test_dask, y_test_dask = distribute() #Pass test data as arguments here
```
#### Create the Scikit-learn model
Since a scikit-learn equivalent to the multi-node multi-GPU K-means in cuML doesn't exist, we will use Dask-ML's implementation for comparison.
```
%%time
# Use all avilable CPU cores
skl_model = sklRF(max_depth=max_depth, n_estimators=n_trees, n_jobs=-1)
skl_model.fit(X_train, y_train.iloc[:,1])
```
#### Train the distributed cuML model
```
#Modify the code in this cell
%%time
cuml_model = cumlDaskRF(max_depth=max_depth, n_estimators=n_trees, n_bins=n_bins, n_streams=n_streams)
cuml_model.fit() # Pass X and y train dask data here
wait(cuml_model.rfs) # Allow asynchronous training tasks to finish
```
#### Predict and check accuracy
```
#Modify the code in this cell
skl_y_pred = skl_model.predict(X_test)
cuml_y_pred = cuml_model.predict().compute().to_array() #Pass the X test dask data as argument here
# Due to randomness in the algorithm, you may see slight variation in accuracies
print("SKLearn accuracy: ", accuracy_score(y_test.iloc[:,1], skl_y_pred))
print("CuML accuracy: ", accuracy_score()) #Pass the y test dask data and predicted values from CuML model as argument here
```
<a id='ex4'></a><br>
### 4. CONCLUSION
Let's compare the performance of our solution!
| Algorithm | Implementation | Accuracy | Time | Algorithm | Implementation | Accuracy | Time |
| ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |
Write down your observations and compare the CuML and Scikit learn scores. They should be approximately equal. We hope that you found this exercise exciting and beneficial in understanding RAPIDS better. Share your highest accuracy and try to use the unique features of RAPIDS for accelerating your data science pipelines. Don't restrict yourself to the previously explained concepts, but use the documentation to apply more models and functions and achieve the best results.
### 5. References
<p xmlns:dct="http://purl.org/dc/terms/">
<a rel="license"
href="http://creativecommons.org/publicdomain/zero/1.0/">
<center><img src="http://i.creativecommons.org/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" /></center>
</a>
</p>
- The dataset is licensed under a CC0: Public Domain license.
- Molecular Classification of Cancer: Class Discovery and Class Prediction by Gene Expression. Science 286:531-537. (1999). Published: 1999.10.14. T.R. Golub, D.K. Slonim, P. Tamayo, C. Huard, M. Gaasenbeek, J.P. Mesirov, H. Coller, M. Loh, J.R. Downing, M.A. Caligiuri, C.D. Bloomfield, and E.S. Lander
## Licensing
This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
[Previous Notebook](Challenge.ipynb)
     
     
     
     
[1](Challenge.ipynb)
[2]
     
     
     
     
     
     
     
     
     
   
[Home Page](../../START_HERE.ipynb)
| true |
code
| 0.446857 | null | null | null | null |
|
# Ray Crash Course - Actors
© 2019-2021, Anyscale. All Rights Reserved

Using Ray _tasks_ is great for distributing work around a cluster, but we've said nothing so far about managing distributed _state_, one of the big challenges in distributed computing. Ray tasks are great for _stateless_ computation, but we need something for _stateful_ computation.
Python classes are a familiar mechanism for encapsulating state. Just as Ray tasks extend the familiar concept of Python _functions_, Ray addresses stateful computation by extending _classes_ to become Ray _actors_.
> **Tip:** For more about Ray, see [ray.io](https://ray.io) or the [Ray documentation](https://docs.ray.io/en/latest/).
## What We Mean by Distributed State
If you've worked with data processing libraries like [Pandas](https://pandas.pydata.org/) or big data tools like [Apache Spark](https://spark.apache.org), you know that they provide rich features for manipulating large, structured _data sets_, i.e., the analogs of tables in a database. Some tools even support partitioning of these data sets over clusters for scalability.
This isn't the kind of distributed "state" Ray addresses. Instead, it's the more open-ended _graph of objects_ found in more general-purpose applications. For example, it could be the state of a game engine used in a reinforcement learning (RL) application or the total set of parameters in a giant neural network, some of which now have hundreds of millions of parameters.
## Conway's Game of Life
Let's explore Ray's actor model using [Conway's Game of Life](https://en.wikipedia.org/wiki/Conway's_Game_of_Life), a famous _cellular automaton_.
Here is an example of a notable pattern of game evolution, _Gospers glider gun_:

(credit: Lucas Vieira - Own work, CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=101736)
We'll use an implementation of Conway's Game of Life as a nontrivial example of maintaining state, the current grid of living and dead cells. We'll see how to leverage Ray to scale it.
> **Note:** Sadly, [John Horton Conway](https://en.wikipedia.org/wiki/John_Horton_Conway), the inventor of this automaton, passed away from COVID-19 on April 11, 2020. This lesson is dedicated to Professor Conway.
Let's start with some imports
```
import ray, time, statistics, sys, os
import numpy as np
import os
sys.path.append("..") # For library helper functions
```
I've never seen this done anywhere else, but our implementation of Game of Life doesn't just use `1` for living cells, it uses the number of iterations they've been alive, so `1-N`. I'll exploit this when we graph the game.
```
from game_of_life import Game, State, ConwaysRules
```
Utility functions for plotting using Holoviews and Bokeh, as well as running and timing games.
```
from actor_lesson_util import new_game_of_life_graph, new_game_of_life_grid, run_games, run_ray_games, show_cmap
```
The implementation is a bit long, so all the code is contained in [`game_of_life.py`](game_of_life.py).
(You can also run that file as a standalone script from the command line, try `python game_of_life.py --help`. On MacOS and Linux machines, the script is executable, so you can omit the `python`).
The first class is the `State`, which encapsulates the board state as an `N x N` grid of _cells_, where `N` is specified by the user. (For simplicity, we just use square grids.) There are two ways to initialize the game, specifying a starting grid or a size, in which case the cells are set randomly. The sample below just shows the size option. `State` instances are _immutable_, because the `Game` (discussed below) keeps a sequence of them, representing the lifetime states of the game.
For smaller grids, it's often possible that the game reaches a terminal state where it stops evolving. Larger grids are more likely to exhibit different cyclic patterns that would evolve forever, thereby making those runs appear to be _immortal_, except they eventually get disrupted by evolving neighbors.
```python
class State:
def __init__(self, size = 10):
# The version in the file also lets you pass in a grid of initial cells.
self.size = size
self.grid = np.random.randint(2, size = size*size).reshape((size, size))
def living_cells(self):
cells = [(i,j) for i in range(self.size) for j in range(self.size) if self.grid[i][j] != 0]
return zip(*cells)
```
Next, `ConwaysRules` encapsulates the logic of computing the new state of a game from the current state, using the update rules defined as follows:
* Any live cell with fewer than two live neighbours dies, as if by underpopulation.
* Any live cell with two or three live neighbours lives on to the next generation.
* Any live cell with more than three live neighbours dies, as if by overpopulation.
* Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
This class is stateless; `step()` is passed a `State` instance and it returns a new instance for the udpated state.
```python
class ConwaysRules:
def step(self, state):
"""
Determine the next values for all the cells, based on the current
state. Creates a new State with the changes.
"""
new_grid = state.grid.copy()
for i in range(state.size):
for j in range(state.size):
new_grid[i][j] = self.apply_rules(i, j, state)
new_state = State(grid = new_grid)
return new_state
def apply_rules(self, i, j, state):
# Compute and return the next state for grid[i][j]
return ...
```
Finally, the game holds a sequence of states and the rules "engine".
```python
class Game:
def __init__(self, initial_state, rules):
self.states = [initial_state]
self.rules = rules
def step(self, num_steps = 1):
"""Take 1 or more steps, returning a list of new states."""
new_states = [self.rules.step(self.states[-1]) for _ in range(num_steps)]
self.states.extend(new_states)
return new_states
```
Okay, let's try it out!!
```
steps = 100 # Use a larger number for a long-running game.
game_size = 100
plot_size = 800
max_cell_age = 10 # clip the age of cells for graphing.
use_fixed_cell_sizes = True # Keep the points the same size. Try False, too!
```
For the graphs, we'll use a "greenish" background that looks good with `RdYlBu` color map.
However, if you have red-green color blindness, change the `bgcolor` string to `white`! Or, try the second combination with a custom color map `cmap` and background color `white` or `darkgrey`.
```
# Color maps from Bokeh:
cmap = 'RdYlBu' # others: 'Turbo' 'YlOrBr'
bgcolor = '#C0CfC8' # a greenish color, but not great for forms of red-green color blindness, where 'white' is better.
# A custom color map created at https://projects.susielu.com/viz-palette. Works best with white or dark grey background
#cmap=['#ffd700', '#ffb14e', '#fa8775', '#ea5f94', '#cd34b5', '#9d02d7', '#0000ff']
#bgcolor = 'darkgrey' # 'white'
def new_game(game_size):
initial_state = State(size = game_size)
rules = ConwaysRules()
game = Game(initial_state=initial_state, rules=rules)
return game
game = new_game(10)
print(game.states[0])
```
Now let's create a graph for a game of life using the imported utility function, `new_game_of_life_grid` (with only one graph in the "grid" for now).
**Note:** It will be empty for now.
```
_, graphs = new_game_of_life_grid(game_size, plot_size, x_grid=1, y_grid=1, shrink_factor=1.0,
bgcolor=bgcolor, cmap=cmap,
use_fixed_cell_sizes=use_fixed_cell_sizes, max_cell_age=max_cell_age)
graphs[0]
```
To make sure we don't consume too much driver memory, since games can grow large, let's write a function, `do_trial`, to run the experiment, then when it returns, the games will go out of scope and their memory will be reclaimed. It will use a library function we imported, `run_games` and the `new_game` function above to do most of the work.
(You might wonder why we don't create the `graphs` inside the function. It's essentially impossible to show the grid **before** the games run **and** to do the update visualization after it's shown inside one function inside a notebook cell. We have to build the grid, render it separately, then call `do_trial`.)
```
def do_trial(graphs, num_games=1, steps=steps, batch_size=1, game_size_for_each=game_size, pause_between_batches=0.0):
games = [new_game(game_size_for_each) for _ in range(num_games)]
return run_games(games, graphs, steps, batch_size, pause_between_batches)
%time num_games, steps, batch_size, duration = do_trial(graphs, steps=steps, pause_between_batches=0.1)
num_games, steps, batch_size, duration
```
If you can't see the plot or see it update, click here for a screen shot:
* [colored background](../images/ConwaysGameOfLife-Snapshot.png)
* [white background](../images/ConwaysGameOfLife-Snapshot-White-Background.png)
(Want to run longer? Pass a larger value for `steps` in the previous cell. 1000 takes several minutes, but you'll see interesting patterns develop.)
The first line of output is written by `run_games`, which is called by `do_trial`. The next two lines are output from the `%time` "magic". The fourth line shows the values returned by `run_games` through `do_trial`, which we'll use more fully in the exercise below.
How much time did it take? Note that there were `steps*0.1` seconds of sleep time between steps, so the rest is compute time. Does that account for the difference between the _user_ time and the _wall_ time?
```
steps*0.1
```
Yes, this covers most of the extra wall time.
A point's color changed as it lived longer. Here is the _color map_ used, where the top color corresponds to the longest-lived cells.
```
show_cmap(cmap=cmap, max_index=max_cell_age)
```
If you can't see the color map in the previous cell output, click [here](../images/ConwaysGameOfLife-ColorMap-RdYlBu.png) for the color map `RdYlBu`.
You could experiment with different values for `max_cell_age`.
> **Mini Exercise:** Change the value passed for `use_fixed_cell_sizes` to be `False` (in the cell that calls `new_game_of_life_grid`). Then rerun the `%time do_trial()` cell. What happens to the graph?
### Running Lots of Games
Suppose we wanted to run many of these games at the same time. For example, we might use reinforcement learning to find the initial state that maximizes some _reward_, like the most live cells after `N` steps or for immortal games. You could try writing a loop that starts `M` games and run the previous step loop interleaving games. Let's try that, with smaller grids.
```
x_grid = 5
y_grid = 3
shrink_factor = y_grid # Instead of 1 N-size game, build N/shrinkfactor size games
small_game_size = round(game_size/shrink_factor)
```
First build a grid of graphs, like before:
```
gridspace, all_graphs = new_game_of_life_grid(small_game_size, plot_size, x_grid, y_grid, shrink_factor,
bgcolor=bgcolor, cmap=cmap,
use_fixed_cell_sizes=use_fixed_cell_sizes, max_cell_age=max_cell_age)
gridspace
%time num_games, steps, batch_size, duration = do_trial(all_graphs, num_games=x_grid*y_grid, steps=steps, batch_size=1, game_size_for_each=small_game_size, pause_between_batches=0.1)
num_games, steps, batch_size, duration
```
If you can't see the plot or see it update, click here for a screen shot:
* [colored background](../images/ConwaysGameOfLife-Grid-Snapshot.png)
* [white background](../images/ConwaysGameOfLife-Grid-Snapshot-White-Background.png) (captured earlier in the run)
How much time did it take? You can perceive a "wave" across the graphs at each time step, because the games aren't running concurrently. Sometimes, a "spurt" of updates will happen, etc. Not ideal...
There were the same `steps*0.1` seconds of sleep time between steps, not dependent on the number of games, so the rest is compute time.
## Improving Performance with Ray.
Let's start Ray as before in the [first lesson](01-Ray-Tasks.ipynb).
```
ray.init(ignore_reinit_error=True)
```
Running on your laptop? Click the output of the next cell to open the Ray Dashboard.
If you are running on the Anyscale platform, use the dashboard URL provided to you.
```
print(f'New port? http://{ray.get_dashboard_url()}')
```
## Actors - Ray's Tool for Distributed State
Python is an object-oriented language. We often encapsulate bits of state in classes, like we did for `State` above. Ray leverages this familiar mechanism to manage distributed state.
Recall that adding the `@ray.remote` annotation to a _function_ turned it into a _task_. If we use the same annotation on a Python _class_, we get an _actor_.
### Why "Actor"
The [Actor Model of Concurrency](https://en.wikipedia.org/wiki/Actor_model) is almost 50 years old! It's a _message-passing_ model, where autonomous blocks of code, the actors, receive messages from other actors asking them to perform work or return some results. Implementations provide thread safety while the messages are processed, one at a time. This means the user of an actor model implementation doesn't have to worry about writing thread-safe code. Because many messages might arrive while one is being processed, they are stored in a queue and processed one at a time, the order of arrival.
There are many other implementations of the actor model, including [Erlang](https://www.erlang.org/), the first system to create a production-grade implementation, initially used for telecom switches, and [Akka](https://akka.io), a JVM implementation inspired by Erlang.
> **Tip:** The [Ray Package Reference](https://ray.readthedocs.io/en/latest/package-ref.html) in the [Ray Docs](https://ray.readthedocs.io/en/latest/) is useful for exploring the API features we'll learn.
Let's start by simply making `Game` an actor. We'll just subclass it and add `@ray.remote` to the subclass.
There's one other change we have to make; if we want to access the `state` and `rules` instances in an Actor, we can't just use `mygame.state`, for example, as you would normally do for Python instances. Instead, we have to add "getter" methods for them.
Here's our Game actor definition.
```
@ray.remote
class RayGame(Game):
def __init__(self, initial_state, rules):
super().__init__(initial_state, rules)
def get_states(self):
return self.states
def get_rules(self):
return self.rules
```
To construct an instance and call methods, you use `.remote` as for tasks:
```
def new_ray_game(game_size):
initial_state = State(size = game_size)
rules = ConwaysRules()
ray_game_actor = RayGame.remote(initial_state, rules) # Note that .remote(...) is used to construct the instance.
return ray_game_actor
```
We'll use the following function to try out the implementation, but then take the Ray actor out of scope when we're done. This is because actors remain pinned to a worker as long as the driver (this notebook) has a reference to them. We don't want that wasted space...
```
def try_ray_game_actor():
ray_game_actor = new_ray_game(small_game_size)
print(f'Actor for game: {ray_game_actor}')
init_states = ray.get(ray_game_actor.step.remote())
print(f'\nInitial state:\n{init_states[0]}')
new_states = ray.get(ray_game_actor.step.remote())
print(f'\nState after step #1:\n{new_states[0]}')
try_ray_game_actor()
```
> **Key Points:** To summarize:
>
> 1. Declare an _actor_ by annotating a class with `@ray.remote`, just like declaring a _task_ from a function.
> 2. Add _accessor_ methods for any data members that you need to read or write, because using direct access, such as `my_game.state`, doesn't work for actors.
> 3. Construct actor instances with `my_instance = MyClass.remote(...)`.
> 4. Call methods with `my_instance.some_method.remote(...)`.
> 5. Use `ray.get()` and `ray.wait()` to retrieve results, just like you do for task results.
> **Tip:** If you start getting warnings about lots of Python processes running or you have too many actors scheduled, you can safely ignore these messages for now, but the performance measurements below won't be as accurate.
Okay, now let's repeat our grid experiment with a Ray-enabled Game of Life. Let's define a helper function, `do_ray_trail`, which is analogous to `do_trial` above. It encapsulates some of the steps, for the same reasons mentioned above; so that our actors go out of scope and the worker slots are reclaimed when the function call returns.
We call a library function `run_ray_games` to run these games. It's somewhat complicated, because it uses `ray.wait()` to process updates as soon as they are available, and also has hooks for batch processing and running without graphing (see below).
We'll create the graphs separately and pass them into `do_ray_trial`.
```
def do_ray_trial(graphs, num_games=1, steps=steps, batch_size=1, game_size_for_each=game_size, pause_between_batches=0.0):
game_actors = [new_ray_game(game_size_for_each) for _ in range(num_games)]
return run_ray_games(game_actors, graphs, steps, batch_size, pause_between_batches)
ray_gridspace, ray_graphs = new_game_of_life_grid(small_game_size, plot_size, x_grid, y_grid, shrink_factor,
bgcolor=bgcolor, cmap=cmap,
use_fixed_cell_sizes=use_fixed_cell_sizes, max_cell_age=max_cell_age)
ray_gridspace
%time do_ray_trial(ray_graphs, num_games=x_grid*y_grid, steps=steps, batch_size=1, game_size_for_each=small_game_size, pause_between_batches=0.1)
```
(Can't see the image? It's basically the same as the previous grid example.)
How did your times compare? For example, using a recent model MacBook Pro laptop, this run took roughly 19 seconds vs. 21 seconds for the previous run without Ray. That's not much of an improvement. Why?
In fact, updating the graphs causes enough overhead to remove most of the speed advantage of using Ray. We also sleep briefly between generations for nicer output. However, using Ray does produce smoother graph updates.
So, if we want to study more performance optimizations, we should remove the graphing overhead, which we'll do for the rest of this lesson.
Let's run the two trials without graphs and compare the performance. We'll use no pauses between "batches" and run the same number of games as the number of CPU (cores) Ray says we have. This is actually the number of workers Ray started for us and 2x the number of actual cores:
```
num_cpus_float = ray.cluster_resources()['CPU']
num_cpus_float
```
As soon as you start the next two cell, switch to the Ray Dashboard and watch the CPU utilization. You'll see the Ray workers are idle, because we aren't using them right now, but the total CPU utilization will be about well under 100%. For example, on a four-core laptop, the total CPU utilization will be 20-25% or roughly 1/4th capacity.
Why? We're running the whole computation in the Python process for this notebook, which only utilizes one core.
```
%time do_trial(None, num_games=round(num_cpus_float), steps=steps, batch_size=1, game_size_for_each=game_size, pause_between_batches=0.0)
```
Now use Ray. Again, as soon as you start the next cell, switch to the Ray Dashboard and watch the CPU utilization. Now, the Ray workers will be utilized (but not 100%) and the total CPU utilization will be higher. You'll probably see 70-80% utilization.
Hence, now we're running on all cores.
```
%time do_ray_trial(None, num_games=round(num_cpus_float), steps=steps, batch_size=1, game_size_for_each=game_size, pause_between_batches=0.0)
```
So, using Ray does help when running parallel games. On a typical laptop, the performance boost is about 2-3 times better. It's not 15 times better (the number of concurrent games), because the computation is CPU intensive for each game with frequent memory access, so all the available cores are fully utilized. We would see much more impressive improvements on a cluster with a lot of CPU cores when running a massive number of games.
Notice the times for `user` and `total` times reported for the non-Ray and Ray runs (which are printed by the `%time` "magic"). They are only measuring the time for the notebook Python process, i.e., our "driver" program, not the whole application. Without Ray, all the work is done in this process, as we said previously, so the `user` and `total` times roughly equal the wall clock time. However, for Ray, these times are very low; the notebook is mostly idle, while the work is done in the separate Ray worker processes.
## More about Actors
Let's finish with a discussion of additional important information about actors, including recapping some points mentioned above.
### Actor Scheduling and Lifetimes
For the most part, when Ray runs actor code, it uses the same _task_ mechanisms we discussed in the [Ray Tasks](01-Ray-Tasks.ipynb) lesson. Actor constructor and method invocations work just like task invocations. However, there are a few notable differences:
* Once a _task_ finishes, it is removed from the worker that executed it, while an actor is _pinned_ to the worker until all Python references to it in the driver program are out of scope. That is, the usual garbage collection mechanism in Python determines when an actor is no longer needed and is removed from a worker. The reason the actor must remain in memory is because it holds state that might be needed, whereas tasks are stateless.
* Currently, each actor instance uses tens of MB of memory overhead. Hence, just as you should avoid having too many fine-grained tasks, you should avoid too many actor instances. (Reducing the overhead per actor is an ongoing improvement project.)
We explore actor scheduling and lifecycles in much greater depth in lesson [03: Ray Internals](03-Ray-Internals.ipynb) in the [Advanced Ray](../advanced-ray/00-Advanced-Ray-Overview.ipynb) tutorial.
### Durability of Actor State
At this time, Ray provides no built-in mechanism for _persisting_ actor state, i.e., writing to disk or a database in case of process failure. Hence, if a worker or whole server goes down with actor instances, their state is lost.
This is an area where Ray will evolve and improve in the future. For now, an important design consideration is to decide when you need to _checkpoint_ state and to use an appropriate mechanism for this purpose. Some of the Ray APIs explored in other tutorials have built-in checkpoint features, such as for saving snapshots of trained models to a file system.
## Extra - Does It Help to Run with Larger Batch Sizes?
You can read this section but choose to skip running the code for time's sake. The outcomes are discussed at the end.
You'll notice that we defined `run_games` and `do_trial`, as well as `run_ray_games` and `do_ray_trial` to take an optional `batch_size` that defaults to `1`. The idea is that maybe running game steps in batches, rather than one step at a time, will improve performance (but look less pleasing in the graphs).
This concept works in some contexts, such as minimizing the number of messages sent in networks (that is, fewer, but larger payloads), but it actually doesn't help a lot here, because each game is played in a single process, whether using Ray or not (at least as currently implemented...). Batching reduces the number of method invocations, but it's not an important amount of overhead in our case.
Let's confirm our suspicion about batching, that it doesn't help a lot.
Let's time several batch sizes without and with Ray. We'll run several times with each batch size to get an informal sense of the variation possible.
Once again, watch the Ray Dashboard while the next two code cells run.
```
for batch in [1, 10, 25, 50]:
for run in [0, 1]:
do_trial(graphs = None, num_games=1, steps=steps, batch_size=batch, game_size_for_each=game_size, pause_between_batches=0.0)
```
There isn't a significant difference based on batch size.
What about Ray? If we're running just one game, the results should be about the same.
```
for batch in [1, 10, 25, 50]:
for run in [0, 1]:
do_ray_trial(graphs = None, num_games=1, steps=steps, batch_size=batch, game_size_for_each=game_size, pause_between_batches=0.0)
```
With Ray's background activity, there is likely to be a little more variation in the numbers, but the conclusion is the same; the batch size doesn't matter because no additional exploitation of asynchronous computing is used.
# Exercises
When we needed to run multiple games concurrently as fast as possible, Ray was an easy win. If we graphed them while running, the wall-clock time is about the same, due to the graphics overhead, but the graphs updated more smoothly and each one looked independent.
Just as for Ray tasks, actors add some overhead, so there will be a crossing point for small problems where the concurrency provided by Ray won't be as beneficial. This exercise uses a simple actor example to explore this tradeoff.
See the [solutions notebook](solutions/Ray-Crash-Course-Solutions.ipynb) for a discussion of questions posed in this exercise.
## Exercise 1
Let's investigat Ray Actor performance. Answers to the questions posed here are in the [solutions](solutions/Ray-Crash-Course-Solutions.ipynb) notebook.
Consider the following class and actor, which simulate a busy process using `time.sleep()`:
```
class Counter:
"""Remember how many times ``next()`` has been called."""
def __init__(self, pause):
self.count = 0
self.pause = pause
def next(self):
time.sleep(self.pause)
self.count += 1
return self.count
@ray.remote
class RayCounter(Counter):
"""Remember how many times ``next()`` has been called."""
def __init__(self, pause):
super().__init__(pause)
def get_count(self):
return self.count
```
Recall that for an actor we need an accessor method to get the current count.
Here are methods to time them.
```
def counter_trial(count_to, num_counters = 1, pause = 0.01):
print('not ray: count_to = {:5d}, num counters = {:4d}, pause = {:5.3f}: '.format(count_to, num_counters, pause), end='')
start = time.time()
counters = [Counter(pause) for _ in range(num_counters)]
for i in range(num_counters):
for n in range(count_to):
counters[i].next()
duration = time.time() - start
print('time = {:9.5f} seconds'.format(duration))
return count_to, num_counters, pause, duration
def ray_counter_trial(count_to, num_counters = 1, pause = 0.01):
print('ray: count_to = {:5d}, num counters = {:4d}, pause = {:5.3f}: '.format(count_to, num_counters, pause), end='')
start = time.time()
final_count_futures = []
counters = [RayCounter.remote(pause) for _ in range(num_counters)]
for i in range(num_counters):
for n in range(count_to):
counters[i].next.remote()
final_count_futures.append(counters[i].get_count.remote())
ray.get(final_count_futures) # Discard result, but wait until finished!
duration = time.time() - start
print('time = {:9.5f} seconds'.format(duration))
return count_to, num_counters, pause, duration
```
Let's get a sense of what the performance looks like:
```
count_to = 10
for num_counters in [1, 2, 3, 4]:
counter_trial(count_to, num_counters, 0.0)
for num_counters in [1, 2, 3, 4]:
counter_trial(count_to, num_counters, 0.1)
for num_counters in [1, 2, 3, 4]:
counter_trial(count_to, num_counters, 0.2)
```
When there is no sleep pause, the results are almost instaneous. For nonzero pauses, the times scale linearly in the pause size and the number of `Counter` instances. This is expected, since `Counter` and `counter_trail` are completely synchronous.
What about for Ray?
```
count_to = 10
for num_counters in [1, 2, 3, 4]:
ray_counter_trial(count_to, num_counters, 0.0)
for num_counters in [1, 2, 3, 4]:
ray_counter_trial(count_to, num_counters, 0.1)
for num_counters in [1, 2, 3, 4]:
ray_counter_trial(count_to, num_counters, 0.2)
```
Ray has higher overhead, so the zero-pause times for `RayCounter` are much longer than for `Counter`, but the times are roughly independent of the number of counters, because the instances are now running in parallel unlike before. However, the times _per counter_ still grow linearly in the pause time and they are very close to the the times per counter for `Counter` instances. Here's a repeat run to show what we mean:
```
count_to=10
num_counters = 1
for pause in range(0,6):
counter_trial(count_to, num_counters, pause*0.1)
ray_counter_trial(count_to, num_counters, pause*0.1)
```
Ignoring pause = 0, can you explain why the Ray times are almost, but slightly larger than the non-ray times consistently? Study the implementations for `ray_counter_trial` and `RayCounter`. What code is synchronous and blocking vs. concurrent? In fact, is there _any_ code that is actually concurrent when you have just one instance of `Counter` or `RayCounter`?
To finish, let's look at the behavior for smaller pause steps, 0.0 to 0.1, and plot the times.
```
count_to=10
num_counters = 1
pauses=[]
durations=[]
ray_durations=[]
for pause in range(0,11):
pauses.append(pause*0.01)
_, _, _, duration = counter_trial(count_to, num_counters, pause*0.01)
durations.append(duration)
_, _, _, duration = ray_counter_trial(count_to, num_counters, pause*0.01)
ray_durations.append(duration)
from bokeh_util import two_lines_plot # utility we used in the previous lesson
from bokeh.plotting import show, figure
from bokeh.layouts import gridplot
two_lines = two_lines_plot(
"Pause vs. Execution Times (Smaller Is Better)", 'Pause', 'Time', 'No Ray', 'Ray',
pauses, durations, pauses, ray_durations,
x_axis_type='linear', y_axis_type='linear')
show(two_lines, plot_width=800, plot_height=400)
```
(Can't see the plot? Click [here](../images/actor-trials.png) for a screen shot.)
Once past zero pauses, the Ray overhead is constant. It doesn't grow with the pause time. Can you explain why it doesn't grow?
Run the next cell when you are finished with this notebook:
```
ray.shutdown() # "Undo ray.init()". Terminate all the processes started in this notebook.
```
The next lesson, [Why Ray?](03-Why-Ray.ipynb), takes a step back and explores the origin and motivations for Ray, and Ray's growing ecosystem of libraries and tools.
| true |
code
| 0.599192 | null | null | null | null |
|
# Using the MANN Package to convert and prune an existing TensorFlow model
In this notebook, we utilize the MANN package on an existing TensorFlow model to convert existing layers to MANN layers and then prune the model.
```
# Load the MANN package and TensorFlow
import tensorflow as tf
import mann
# Load the data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train/255
x_test = x_test/255
# Load the model to be used
vgg16 = tf.keras.applications.VGG16(
include_top = False, # Don't include the top layers
weights = 'imagenet', # Load the imagenet weights
input_shape = x_train.shape[1:] # Input shape is the shape of the images
)
```
## Create the model to be trained
In the following cell, we create the model using the existing VGG model fed into fully-connected layers.
```
# Build the model using VGG16 and a few layers on top of it
model = tf.keras.models.Sequential()
model.add(vgg16)
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation = 'relu'))
model.add(tf.keras.layers.Dense(512, activation = 'relu'))
model.add(tf.keras.layers.Dense(512, activation = 'relu'))
model.add(tf.keras.layers.Dense(10, activation = 'softmax'))
# Compile the model
model.compile(
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'],
optimizer = 'adam'
)
# Present model summary
model.summary()
```
## Convert the model and perform initial pruning
In the following cell, we convert the model and perform initial pruning of the model to 40%.
```
# Use the add_layer_masks function to add masking layers to the model
converted_model = mann.utils.add_layer_masks(model)
# Compile the model
converted_model.compile(
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'],
optimizer = 'adam'
)
# Mask the model using magnitude as the metric
converted_model = mann.utils.mask_model(
converted_model,
40,
method = 'magnitude'
)
# Recompile the model for the weights to take effect
converted_model.compile(
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'],
optimizer = 'adam'
)
# Present the model summary
converted_model.summary()
```
## Train and further prune the model
In this cell, we create the ActiveSparsification callback and train the model using that callback to prune the model as the model improves in performance.
```
# Create the sparsification callback object
callback = mann.utils.ActiveSparsification(
performance_cutoff = 0.75, # The accuracy score the model needs to achieve
starting_sparsification = 40, # Starting sparsification
sparsification_rate = 5 # Sparsification increase every time the model achieves performance cutoff
)
# Fit the model
model.fit(
x_train,
y_train,
epochs = 1000,
callbacks = [callback],
validation_split = 0.2,
batch_size = 256
)
```
## Convert the model back to remove masking layers
In the following cell, we remove the layer masks created for training, while completely preserving performance.
```
# Convert the model back
model = mann.utils.remove_layer_masks(model)
# Present the model
model.summary()
```
## Report accuracy and save model
```
# Get the predictions on test data
preds = model.predict(x_test).argmax(axis = 1)
# Print the accuracy
print(f'Model Accuracy: {(preds.flatten() == y_test.flatten()).sum().astype(int)/y_test.flatten().shape[0]}')
# Save the model
model.save('cifar_vgg16.h5')
```
| true |
code
| 0.760328 | null | null | null | null |
|
# Demistifying GANs in TensorFlow 2.0
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
print(tf.__version__)
```
## Global Parameters
```
BATCH_SIZE = 256
BUFFER_SIZE = 60000
EPOCHES = 300
OUTPUT_DIR = "img" # The output directory where the images of the generator a stored during training
```
## Loading the MNIST dataset
```
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
#train_images[0]
(train_images[0].shape)
train_images.shape
plt.imshow(train_images[1], cmap = "gray")
```
### Adding the Data to tf.Dataset
```
train_images = train_images.astype("float32")
train_images = (train_images - 127.5) / 127.5
train_dataset = tf.data.Dataset.from_tensor_slices(train_images.reshape(train_images.shape[0],784)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_dataset
```
## Generator Network
```
class Generator(keras.Model):
def __init__(self, random_noise_size = 100):
super().__init__(name='generator')
#layers
self.input_layer = keras.layers.Dense(units = random_noise_size)
self.dense_1 = keras.layers.Dense(units = 128)
self.leaky_1 = keras.layers.LeakyReLU(alpha = 0.01)
self.dense_2 = keras.layers.Dense(units = 128)
self.leaky_2 = keras.layers.LeakyReLU(alpha = 0.01)
self.dense_3 = keras.layers.Dense(units = 256)
self.leaky_3 = keras.layers.LeakyReLU(alpha = 0.01)
self.output_layer = keras.layers.Dense(units=784, activation = "tanh")
def call(self, input_tensor):
## Definition of Forward Pass
x = self.input_layer(input_tensor)
x = self.dense_1(x)
x = self.leaky_1(x)
x = self.dense_2(x)
x = self.leaky_2(x)
x = self.dense_3(x)
x = self.leaky_3(x)
return self.output_layer(x)
def generate_noise(self,batch_size, random_noise_size):
return np.random.uniform(-1,1, size = (batch_size, random_noise_size))
```
### Objective Function
```
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits = True)
def generator_objective(dx_of_gx):
# Labels are true here because generator thinks he produces real images.
return cross_entropy(tf.ones_like(dx_of_gx), dx_of_gx)
```
### Plotting The Noise (Fake Image)
```
generator = Generator()
fake_image = generator(np.random.uniform(-1,1, size =(1,100)))
fake_image = tf.reshape(fake_image, shape = (28,28))
plt.imshow(fake_image, cmap = "gray")
```
## Discriminator Network
```
class Discriminator(keras.Model):
def __init__(self):
super().__init__(name = "discriminator")
#Layers
self.input_layer = keras.layers.Dense(units = 784)
self.dense_1 = keras.layers.Dense(units = 128)
self.leaky_1 = keras.layers.LeakyReLU(alpha = 0.01)
self.dense_2 = keras.layers.Dense(units = 128)
self.leaky_2 = keras.layers.LeakyReLU(alpha = 0.01)
self.dense_3 = keras.layers.Dense(units = 128)
self.leaky_3 = keras.layers.LeakyReLU(alpha = 0.01)
self.logits = keras.layers.Dense(units = 1) # This neuron tells us if the input is fake or real
def call(self, input_tensor):
## Definition of Forward Pass
x = self.input_layer(input_tensor)
x = self.dense_1(x)
x = self.leaky_1(x)
x = self.leaky_2(x)
x = self.leaky_3(x)
x = self.leaky_3(x)
x = self.logits(x)
return x
discriminator = Discriminator()
```
### Objective Function
```
def discriminator_objective(d_x, g_z, smoothing_factor = 0.9):
"""
d_x = real output
g_z = fake output
"""
real_loss = cross_entropy(tf.ones_like(d_x) * smoothing_factor, d_x) # If we feed the discriminator with real images, we assume they all are the right pictures --> Because of that label == 1
fake_loss = cross_entropy(tf.zeros_like(g_z), g_z) # Each noise we feed in are fakes image --> Because of that labels are 0
total_loss = real_loss + fake_loss
return total_loss
```
## Optimizer
```
generator_optimizer = keras.optimizers.RMSprop()
discriminator_optimizer = keras.optimizers.RMSprop()
```
## Training Functions
```
@tf.function()
def training_step(generator: Discriminator, discriminator: Discriminator, images:np.ndarray , k:int =1, batch_size = 32):
for _ in range(k):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
noise = generator.generate_noise(batch_size, 100)
g_z = generator(noise)
d_x_true = discriminator(images) # Trainable?
d_x_fake = discriminator(g_z) # dx_of_gx
discriminator_loss = discriminator_objective(d_x_true, d_x_fake)
# Adjusting Gradient of Discriminator
gradients_of_discriminator = disc_tape.gradient(discriminator_loss, discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) # Takes a list of gradient and variables pairs
generator_loss = generator_objective(d_x_fake)
# Adjusting Gradient of Generator
gradients_of_generator = gen_tape.gradient(generator_loss, generator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
seed = np.random.uniform(-1,1, size = (1, 100)) # generating some noise for the training
# Just to make sure the output directory exists..
import os
directory=OUTPUT_DIR
if not os.path.exists(directory):
os.makedirs(directory)
def training(dataset, epoches):
for epoch in range(epoches):
for batch in dataset:
training_step(generator, discriminator, batch ,batch_size = BATCH_SIZE, k = 1)
## After ith epoch plot image
if (epoch % 50) == 0:
fake_image = tf.reshape(generator(seed), shape = (28,28))
print("{}/{} epoches".format(epoch, epoches))
#plt.imshow(fake_image, cmap = "gray")
plt.imsave("{}/{}.png".format(OUTPUT_DIR,epoch),fake_image, cmap = "gray")
%%time
training(train_dataset, EPOCHES)
```
## Testing the Generator
```
fake_image = generator(np.random.uniform(-1,1, size = (1, 100)))
plt.imshow(tf.reshape(fake_image, shape = (28,28)), cmap="gray")
```
## Obsolete Training Function
I tried to implement the training step with the k factor as described in the original paper. I achieved much worse results as with the function above. Maybe i did something wrong?!
@tf.function()
def training_step(generator: Discriminator, discriminator: Discriminator, images:np.ndarray , k:int =1, batch_size = 256):
for _ in range(k):
with tf.GradientTape() as disc_tape:
noise = generator.generate_noise(batch_size, 100)
g_z = generator(noise)
d_x_true = discriminator(images) # Trainable?
d_x_fake = discriminator(g_z) # dx_of_gx
discriminator_loss = discriminator_objective(d_x_true, d_x_fake, smoothing_factor=0.9)
# Adjusting Gradient of Discriminator
gradients_of_discriminator = disc_tape.gradient(discriminator_loss, discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) # Takes a list of gradient and variables pairs
with tf.GradientTape() as gen_tape:
noise = generator.generate_noise(batch_size, 100)
d_x_fake = discriminator(generator(noise))
generator_loss = generator_objective(d_x_fake)
# Adjusting Gradient of Generator
gradients_of_generator = gen_tape.gradient(generator_loss, generator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
```
```
| true |
code
| 0.74021 | null | null | null | null |
|
Tutorials table of content:
- [Tutorial 1: Run a first scenario](./Tutorial-1_Run_your_first_scenario.ipynb)
- [Tutorial 2: Add contributivity measurements methods](./Tutorial-2_Add_contributivity_measurement.ipynb)
- Tutorial 3: Use a custom dataset
# Tutorial 3 : Use homemade dataset
With this example, we dive deeper into the potential of the library, and run a scenario on a new dataset, that we will implement
## 1 - Prerequisites
In order to run this example, you'll need to:
* use python 3.7 +
* install this package https://pypi.org/project/mplc/
If you did not follow our firsts tutorials, it is highly recommended to [take a look at it !](https://github.com/SubstraFoundation/distributed-learning-contributivity/tree/master/notebooks/examples/)
```
!pip install mplc
```
## 2 - Context
In collaborative data science projects partners sometimes need to train a model on multiple datasets, contributed by different data providing partners. In such cases the partners might have to measure how much each dataset involved contributed to the performance of the model. This is useful for example as a basis to agree on how to share the reward of the ML challenge or the future revenues derived from the predictive model, or to detect possible corrupted datasets or partners not playing by the rules. The library explores this question and the opportunity to implement some mechanisms helping partners in such scenarios to measure each dataset's *contributivity* (as *contribution to the performance of the model*).
In the [first tutorial](./Tutorial-1_Run_your_first_scenario.ipynb), you learned how to parametrize and run a scenario.
In the [second tutorial](./Tutorial-2_Add_contributivity_measurement.ipynb), you discovered how to add to your scenario run one of the contributivity measurement methods available.
In this third tutorial, we are going to use a custom dataset.
### The dataset : Sentiment140
We are going to use a subset of the [sentiment140](http://help.sentiment140.com/for-students) dataset and try to
classified short film review, between positive sentiments and negative sentiments for movies.
*The whole machine learning process is inspired from this [article](https://medium.com/@alyafey22/sentiment-classification-from-keras-to-the-browser-7eda0d87cdc6)*
Please note that the library provided a really easy way to adapt a single partner, common machine learning use case with tensorflow, to a multipartner case, with contributivity measurement.
```
# imports
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import re
from keras.models import Sequential
from keras.layers import Dense, GRU, Embedding
from mplc.dataset import Dataset
from mplc.scenario import Scenario
sns.set()
```
## 3 - Generation, and preparation of the dataset
The scenario object needs a dataset object to run. In the previous tutorials, we indicate which one to generate automatically by passing a name of a pre-implemented dataset to the scenario constructor.
Here, we will create this dataset object and pass it to the scenario constructor. To do so, we are going to create a new class, which inherit from the mplc.Dataset abstract class.
A sub-class of Dataset needs few attribute and method. First, the constructor of the Dataset object needs few arguments.
### Dataset generator :
The structure of the dataset generator is represented below:
```python
dataset = Dataset(
"name",
x_train,
x_test,
y_train,
y_test,
input_shape,
num_classes,
)
```
#### Data labels
The data labels can take whatever shape you need, with only one condition.
The labels need to be convertible into string format, and with respect to the condition that if label1 is equal to label2 (
reciprocally different from), therefore str(label1) must be equal to str(label2) (reciprocally different from)
#### Model generator
This method needs to be implemented, and provides the model use, which will be trained by the `Scenario` object.
Note: It is mandatory to have loss and accuracy as metrics for your model.
#### Train/validation/test splits
The `Dataset` constructor (called via `super()`) must be provided some separated train and test sets (referred to as global train set and global test set).
The global train set is then further split into a global train set and a global validation set, by the function `train_val_split_global`. Please denote that if this function is not overwritten, the sklearn's `train_test_split` function will be called by default, and 10% of the training set will be use as validation set.
In the multi-partner learning computations, the global validation set is used for early stopping and the global test set is used for performance evaluation.
The global train set is then split amongst partners (according to the scenario configuration) to populate the partner's local datasets.
For each partner, the local dataset will be split into separated train, validation and test sets, using the `train_test_split_local` and `train_val_split_local` methods.
These are not mandatory, by default the local dataset will not be split.
Denote that currently, the local validation and test set are not used, but they are available for further developments of multi-partner learning and contributivity measurement approaches.
### Dataset construction
Now that we know all of that, we can create our dataset class.
#### Download and unzip data if needed
```
!curl https://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip --output trainingandtestdata.zip
!unzip trainingandtestdata.zip
```
#### Define our Dataset class
```
class Sentiment140(Dataset):
def __init__(self):
x, y = self.load_data()
self.max_tokens = self.getMax(x)
self.num_words = None
self.word_index = self.tokenize()
self.num_words = len(self.word_index)
x = self.create_sequences(x)
y = self.preprocess_dataset_labels(y)
self.input_shape = self.max_tokens
self.num_classes = len(np.unique(y))
print('length of the dictionary ',len(self.word_index))
print('max token ', self.max_tokens)
print('num classes', self.num_classes)
(x_train, x_test) = train_test_split(x, shuffle = False)
(y_train, y_test) = train_test_split(y, shuffle = False)
super(Sentiment140, self).__init__(dataset_name='sentiment140',
num_classes=self.num_classes,
input_shape=self.input_shape,
x_train=x_train,
y_train=y_train,
x_test=x_test,
y_test=y_test)
@staticmethod
def load_data(): # load the data, transform the .csv into usable dataframe
df_train = pd.read_csv("training.1600000.processed.noemoticon.csv", encoding = "raw_unicode_escape", header=None)
df_test = pd.read_csv("testdata.manual.2009.06.14.csv", encoding = "raw_unicode_escape", header=None)
df_train.columns = ["polarity", "id", "date", "query", "user", "text"]
df_test.columns = ["polarity", "id", "date", "query", "user", "text"]
# We keep only a fraction of the whole dataset
df_train = df_train.sample(frac = 0.1)
x = df_train["text"]
y = df_train["polarity"]
return x, y
# Preprocessing methods
@staticmethod
def process( txt):
out = re.sub(r'[^a-zA-Z0-9\s]', '', txt)
out = out.split()
out = [word.lower() for word in out]
return out
@staticmethod
def getMax( data):
max_tokens = 0
for txt in data:
if max_tokens < len(txt.split()):
max_tokens = len(txt.split())
return max_tokens
def tokenize(self, thresh = 5):
count = dict()
idx = 1
word_index = dict()
for txt in x:
words = self.process(txt)
for word in words:
if word in count.keys():
count[word] += 1
else:
count[word] = 1
most_counts = [word for word in count.keys() if count[word]>=thresh]
for word in most_counts:
word_index[word] = idx
idx+=1
return word_index
def create_sequences(self,data):
tokens = []
for txt in data:
words = self.process(txt)
seq = [0] * self.max_tokens
i = 0
for word in words:
start = self.max_tokens-len(words)
if word.lower() in self.word_index.keys():
seq[i+start] = self.word_index[word]
i+=1
tokens.append(seq)
return np.array(tokens)
@staticmethod
def preprocess_dataset_labels( label):
label = np.array([e/4 for e in label])
return label
def generate_new_model(self): # Define the model generator
model = Sequential()
embedding_size = 8
model.add(Embedding(input_dim=self.num_words,
output_dim=embedding_size,
input_length=self.max_tokens,
name='layer_embedding'))
model.add(GRU(units=16, name = "gru_1",return_sequences=True))
model.add(GRU(units=8, name = "gru_2" ,return_sequences=True))
model.add(GRU(units=4, name= "gru_3"))
model.add(Dense(1, activation='sigmoid',name="dense_1"))
model.compile(loss='binary_crossentropy',
optimizer="Adam",
metrics=['accuracy'])
return model
```
#### Create dataset
And we can eventually generate our object!
```
my_dataset = Sentiment140()
## 4 - Create the custom scenario
The dataset can be passed to the scenario, through the `dataset` argument.
```
# That's it!
Now you can explore our other tutorials for a better overview of what can be done with `mplc`!
This work is collaborative, enthusiasts are welcome to comment open issues and PRs or open new ones.
Should you be interested in this open effort and would like to share any question, suggestion or input, you can use the following channels:
- This Github repository (issues or PRs)
- Substra Foundation's [Slack workspace](https://substra-workspace.slack.com/join/shared_invite/zt-cpyedcab-FHYgpy08efKJ2FCadE2yCA), channel `#workgroup-mpl-contributivity`
- Email: [email protected]

| true |
code
| 0.582907 | null | null | null | null |
|
# Tutorial 11: Normalizing Flows for image modeling

**Filled notebook:**
[](https://github.com/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb)
[](https://colab.research.google.com/github/phlippe/uvadlc_notebooks/blob/master/docs/tutorial_notebooks/tutorial11/NF_image_modeling.ipynb)
**Pre-trained models:**
[](https://github.com/phlippe/saved_models/tree/main/tutorial11)
[](https://drive.google.com/drive/folders/1gttZ5DSrpKwn9g3RcizqA5qG7NFLMgvv?usp=sharing)
**Recordings:**
[](https://youtu.be/U1fwesIusbg)
[](https://youtu.be/qMoGcRhVrF8)
[](https://youtu.be/YoAWiaEt41Y)
[](https://youtu.be/nTyDvn-ADJ4)
**Author:** Phillip Lippe
In this tutorial, we will take a closer look at complex, deep normalizing flows. The most popular, current application of deep normalizing flows is to model datasets of images. As for other generative models, images are a good domain to start working on because (1) CNNs are widely studied and strong models exist, (2) images are high-dimensional and complex, and (3) images are discrete integers. In this tutorial, we will review current advances in normalizing flows for image modeling, and get hands-on experience on coding normalizing flows. Note that normalizing flows are commonly parameter heavy and therefore computationally expensive. We will use relatively simple and shallow flows to save computational cost and allow you to run the notebook on CPU, but keep in mind that a simple way to improve the scores of the flows we study here is to make them deeper.
Throughout this notebook, we make use of [PyTorch Lightning](https://pytorch-lightning.readthedocs.io/en/latest/). The first cell imports our usual libraries.
```
## Standard libraries
import os
import math
import time
import numpy as np
## Imports for plotting
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('svg', 'pdf') # For export
from matplotlib.colors import to_rgb
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 2.0
import seaborn as sns
sns.reset_orig()
## Progress bar
from tqdm.notebook import tqdm
## PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import torch.optim as optim
# Torchvision
import torchvision
from torchvision.datasets import MNIST
from torchvision import transforms
# PyTorch Lightning
try:
import pytorch_lightning as pl
except ModuleNotFoundError: # Google Colab does not have PyTorch Lightning installed by default. Hence, we do it here if necessary
!pip install --quiet pytorch-lightning>=1.4
import pytorch_lightning as pl
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
# Path to the folder where the datasets are/should be downloaded (e.g. MNIST)
DATASET_PATH = "../data"
# Path to the folder where the pretrained models are saved
CHECKPOINT_PATH = "../saved_models/tutorial11"
# Setting the seed
pl.seed_everything(42)
# Ensure that all operations are deterministic on GPU (if used) for reproducibility
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Fetching the device that will be used throughout this notebook
device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
print("Using device", device)
```
Again, we have a few pretrained models. We download them below to the specified path above.
```
import urllib.request
from urllib.error import HTTPError
# Github URL where saved models are stored for this tutorial
base_url = "https://raw.githubusercontent.com/phlippe/saved_models/main/tutorial11/"
# Files to download
pretrained_files = ["MNISTFlow_simple.ckpt", "MNISTFlow_vardeq.ckpt", "MNISTFlow_multiscale.ckpt"]
# Create checkpoint path if it doesn't exist yet
os.makedirs(CHECKPOINT_PATH, exist_ok=True)
# For each file, check whether it already exists. If not, try downloading it.
for file_name in pretrained_files:
file_path = os.path.join(CHECKPOINT_PATH, file_name)
if not os.path.isfile(file_path):
file_url = base_url + file_name
print(f"Downloading {file_url}...")
try:
urllib.request.urlretrieve(file_url, file_path)
except HTTPError as e:
print("Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\n", e)
```
We will use the MNIST dataset in this notebook. MNIST constitutes, despite its simplicity, a challenge for small generative models as it requires the global understanding of an image. At the same time, we can easily judge whether generated images come from the same distribution as the dataset (i.e. represent real digits), or not.
To deal better with the discrete nature of the images, we transform them from a range of 0-1 to a range of 0-255 as integers.
```
# Convert images from 0-1 to 0-255 (integers)
def discretize(sample):
return (sample * 255).to(torch.int32)
# Transformations applied on each image => make them a tensor and discretize
transform = transforms.Compose([transforms.ToTensor(),
discretize])
# Loading the training dataset. We need to split it into a training and validation part
train_dataset = MNIST(root=DATASET_PATH, train=True, transform=transform, download=True)
pl.seed_everything(42)
train_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000])
# Loading the test set
test_set = MNIST(root=DATASET_PATH, train=False, transform=transform, download=True)
# We define a set of data loaders that we can use for various purposes later.
# Note that for actually training a model, we will use different data loaders
# with a lower batch size.
train_loader = data.DataLoader(train_set, batch_size=256, shuffle=False, drop_last=False)
val_loader = data.DataLoader(val_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4)
test_loader = data.DataLoader(test_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4)
```
In addition, we will define below a function to simplify the visualization of images/samples. Some training examples of the MNIST dataset is shown below.
```
def show_imgs(imgs, title=None, row_size=4):
# Form a grid of pictures (we use max. 8 columns)
num_imgs = imgs.shape[0] if isinstance(imgs, torch.Tensor) else len(imgs)
is_int = imgs.dtype==torch.int32 if isinstance(imgs, torch.Tensor) else imgs[0].dtype==torch.int32
nrow = min(num_imgs, row_size)
ncol = int(math.ceil(num_imgs/nrow))
imgs = torchvision.utils.make_grid(imgs, nrow=nrow, pad_value=128 if is_int else 0.5)
np_imgs = imgs.cpu().numpy()
# Plot the grid
plt.figure(figsize=(1.5*nrow, 1.5*ncol))
plt.imshow(np.transpose(np_imgs, (1,2,0)), interpolation='nearest')
plt.axis('off')
if title is not None:
plt.title(title)
plt.show()
plt.close()
show_imgs([train_set[i][0] for i in range(8)])
```
## Normalizing Flows as generative model
In the previous lectures, we have seen Energy-based models, Variational Autoencoders (VAEs) and Generative Adversarial Networks (GANs) as example of generative models. However, none of them explicitly learn the probability density function $p(x)$ of the real input data. While VAEs model a lower bound, energy-based models only implicitly learn the probability density. GANs on the other hand provide us a sampling mechanism for generating new data, without offering a likelihood estimate. The generative model we will look at here, called Normalizing Flows, actually models the true data distribution $p(x)$ and provides us with an exact likelihood estimate. Below, we can visually compare VAEs, GANs and Flows
(figure credit - [Lilian Weng](https://lilianweng.github.io/lil-log/2018/10/13/flow-based-deep-generative-models.html)):
<center width="100%"><img src="comparison_GAN_VAE_NF.png" width="600px"></center>
The major difference compared to VAEs is that flows use *invertible* functions $f$ to map the input data $x$ to a latent representation $z$. To realize this, $z$ must be of the same shape as $x$. This is in contrast to VAEs where $z$ is usually much lower dimensional than the original input data. However, an invertible mapping also means that for every data point $x$, we have a corresponding latent representation $z$ which allows us to perform lossless reconstruction ($z$ to $x$). In the visualization above, this means that $x=x'$ for flows, no matter what invertible function $f$ and input $x$ we choose.
Nonetheless, how are normalizing flows modeling a probability density with an invertible function? The answer to this question is the rule for change of variables. Specifically, given a prior density $p_z(z)$ (e.g. Gaussian) and an invertible function $f$, we can determine $p_x(x)$ as follows:
$$
\begin{split}
\int p_x(x) dx & = \int p_z(z) dz = 1 \hspace{1cm}\text{(by definition of a probability distribution)}\\
\Leftrightarrow p_x(x) & = p_z(z) \left|\frac{dz}{dx}\right| = p_z(f(x)) \left|\frac{df(x)}{dx}\right|
\end{split}
$$
Hence, in order to determine the probability of $x$, we only need to determine its probability in latent space, and get the derivate of $f$. Note that this is for a univariate distribution, and $f$ is required to be invertible and smooth. For a multivariate case, the derivative becomes a Jacobian of which we need to take the determinant. As we usually use the log-likelihood as objective, we write the multivariate term with logarithms below:
$$
\log p_x(\mathbf{x}) = \log p_z(f(\mathbf{x})) + \log{} \left|\det \frac{df(\mathbf{x})}{d\mathbf{x}}\right|
$$
Although we now know how a normalizing flow obtains its likelihood, it might not be clear what a normalizing flow does intuitively. For this, we should look from the inverse perspective of the flow starting with the prior probability density $p_z(z)$. If we apply an invertible function on it, we effectively "transform" its probability density. For instance, if $f^{-1}(z)=z+1$, we shift the density by one while still remaining a valid probability distribution, and being invertible. We can also apply more complex transformations, like scaling: $f^{-1}(z)=2z+1$, but there you might see a difference. When you scale, you also change the volume of the probability density, as for example on uniform distributions (figure credit - [Eric Jang](https://blog.evjang.com/2018/01/nf1.html)):
<center width="100%"><img src="uniform_flow.png" width="300px"></center>
You can see that the height of $p(y)$ should be lower than $p(x)$ after scaling. This change in volume represents $\left|\frac{df(x)}{dx}\right|$ in our equation above, and ensures that even after scaling, we still have a valid probability distribution. We can go on with making our function $f$ more complex. However, the more complex $f$ becomes, the harder it will be to find the inverse $f^{-1}$ of it, and to calculate the log-determinant of the Jacobian $\log{} \left|\det \frac{df(\mathbf{x})}{d\mathbf{x}}\right|$. An easier trick to stack multiple invertible functions $f_{1,...,K}$ after each other, as all together, they still represent a single, invertible function. Using multiple, learnable invertible functions, a normalizing flow attempts to transform $p_z(z)$ slowly into a more complex distribution which should finally be $p_x(x)$. We visualize the idea below
(figure credit - [Lilian Weng](https://lilianweng.github.io/lil-log/2018/10/13/flow-based-deep-generative-models.html)):
<center width="100%"><img src="normalizing_flow_layout.png" width="700px"></center>
Starting from $z_0$, which follows the prior Gaussian distribution, we sequentially apply the invertible functions $f_1,f_2,...,f_K$, until $z_K$ represents $x$. Note that in the figure above, the functions $f$ represent the inverted function from $f$ we had above (here: $f:Z\to X$, above: $f:X\to Z$). This is just a different notation and has no impact on the actual flow design because all $f$ need to be invertible anyways. When we estimate the log likelihood of a data point $x$ as in the equations above, we run the flows in the opposite direction than visualized above. Multiple flow layers have been proposed that use a neural network as learnable parameters, such as the planar and radial flow. However, we will focus here on flows that are commonly used in image modeling, and will discuss them in the rest of the notebook along with the details of how to train a normalizing flow.
## Normalizing Flows on images
To become familiar with normalizing flows, especially for the application of image modeling, it is best to discuss the different elements in a flow along with the implementation. As a general concept, we want to build a normalizing flow that maps an input image (here MNIST) to an equally sized latent space:
<center width="100%" style="padding: 10px"><img src="image_to_gaussian.svg" width="450px"></center>
As a first step, we will implement a template of a normalizing flow in PyTorch Lightning. During training and validation, a normalizing flow performs density estimation in the forward direction. For this, we apply a series of flow transformations on the input $x$ and estimate the probability of the input by determining the probability of the transformed point $z$ given a prior, and the change of volume caused by the transformations. During inference, we can do both density estimation and sampling new points by inverting the flow transformations. Therefore, we define a function `_get_likelihood` which performs density estimation, and `sample` to generate new examples. The functions `training_step`, `validation_step` and `test_step` all make use of `_get_likelihood`.
The standard metric used in generative models, and in particular normalizing flows, is bits per dimensions (bpd). Bpd is motivated from an information theory perspective and describes how many bits we would need to encode a particular example in our modeled distribution. The less bits we need, the more likely the example in our distribution. When we test for the bits per dimension of our test dataset, we can judge whether our model generalizes to new samples of the dataset and didn't memorize the training dataset. In order to calculate the bits per dimension score, we can rely on the negative log-likelihood and change the log base (as bits are binary while NLL is usually exponential):
$$\text{bpd} = \text{nll} \cdot \log_2\left(\exp(1)\right) \cdot \left(\prod d_i\right)^{-1}$$
where $d_1,...,d_K$ are the dimensions of the input. For images, this would be the height, width and channel number. We divide the log likelihood by these extra dimensions to have a metric which we can compare for different image resolutions. In the original image space, MNIST examples have a bits per dimension score of 8 (we need 8 bits to encode each pixel as there are 256 possible values).
```
class ImageFlow(pl.LightningModule):
def __init__(self, flows, import_samples=8):
"""
Inputs:
flows - A list of flows (each a nn.Module) that should be applied on the images.
import_samples - Number of importance samples to use during testing (see explanation below). Can be changed at any time
"""
super().__init__()
self.flows = nn.ModuleList(flows)
self.import_samples = import_samples
# Create prior distribution for final latent space
self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)
# Example input for visualizing the graph
self.example_input_array = train_set[0][0].unsqueeze(dim=0)
def forward(self, imgs):
# The forward function is only used for visualizing the graph
return self._get_likelihood(imgs)
def encode(self, imgs):
# Given a batch of images, return the latent representation z and ldj of the transformations
z, ldj = imgs, torch.zeros(imgs.shape[0], device=self.device)
for flow in self.flows:
z, ldj = flow(z, ldj, reverse=False)
return z, ldj
def _get_likelihood(self, imgs, return_ll=False):
"""
Given a batch of images, return the likelihood of those.
If return_ll is True, this function returns the log likelihood of the input.
Otherwise, the ouptut metric is bits per dimension (scaled negative log likelihood)
"""
z, ldj = self.encode(imgs)
log_pz = self.prior.log_prob(z).sum(dim=[1,2,3])
log_px = ldj + log_pz
nll = -log_px
# Calculating bits per dimension
bpd = nll * np.log2(np.exp(1)) / np.prod(imgs.shape[1:])
return bpd.mean() if not return_ll else log_px
@torch.no_grad()
def sample(self, img_shape, z_init=None):
"""
Sample a batch of images from the flow.
"""
# Sample latent representation from prior
if z_init is None:
z = self.prior.sample(sample_shape=img_shape).to(device)
else:
z = z_init.to(device)
# Transform z to x by inverting the flows
ldj = torch.zeros(img_shape[0], device=device)
for flow in reversed(self.flows):
z, ldj = flow(z, ldj, reverse=True)
return z
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=1e-3)
# An scheduler is optional, but can help in flows to get the last bpd improvement
scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.99)
return [optimizer], [scheduler]
def training_step(self, batch, batch_idx):
# Normalizing flows are trained by maximum likelihood => return bpd
loss = self._get_likelihood(batch[0])
self.log('train_bpd', loss)
return loss
def validation_step(self, batch, batch_idx):
loss = self._get_likelihood(batch[0])
self.log('val_bpd', loss)
def test_step(self, batch, batch_idx):
# Perform importance sampling during testing => estimate likelihood M times for each image
samples = []
for _ in range(self.import_samples):
img_ll = self._get_likelihood(batch[0], return_ll=True)
samples.append(img_ll)
img_ll = torch.stack(samples, dim=-1)
# To average the probabilities, we need to go from log-space to exp, and back to log.
# Logsumexp provides us a stable implementation for this
img_ll = torch.logsumexp(img_ll, dim=-1) - np.log(self.import_samples)
# Calculate final bpd
bpd = -img_ll * np.log2(np.exp(1)) / np.prod(batch[0].shape[1:])
bpd = bpd.mean()
self.log('test_bpd', bpd)
```
The `test_step` function differs from the training and validation step in that it makes use of importance sampling. We will discuss the motiviation and details behind this after understanding how flows model discrete images in continuous space.
### Dequantization
Normalizing flows rely on the rule of change of variables, which is naturally defined in continuous space. Applying flows directly on discrete data leads to undesired density models where arbitrarly high likelihood are placed on a few, particular values. See the illustration below:
<center><img src="dequantization_issue.svg" width="40%"/></center>
The black points represent the discrete points, and the green volume the density modeled by a normalizing flow in continuous space. The flow would continue to increase the likelihood for $x=0,1,2,3$ while having no volume on any other point. Remember that in continuous space, we have the constraint that the overall volume of the probability density must be 1 ($\int p(x)dx=1$). Otherwise, we don't model a probability distribution anymore. However, the discrete points $x=0,1,2,3$ represent delta peaks with no width in continuous space. This is why the flow can place an infinite high likelihood on these few points while still representing a distribution in continuous space. Nonetheless, the learned density does not tell us anything about the distribution among the discrete points, as in discrete space, the likelihoods of those four points would have to sum to 1, not to infinity.
To prevent such degenerated solutions, a common solution is to add a small amount of noise to each discrete value, which is also referred to as dequantization. Considering $x$ as an integer (as it is the case for images), the dequantized representation $v$ can be formulated as $v=x+u$ where $u\in[0,1)^D$. Thus, the discrete value $1$ is modeled by a distribution over the interval $[1.0, 2.0)$, the value $2$ by an volume over $[2.0, 3.0)$, etc. Our objective of modeling $p(x)$ becomes:
$$ p(x) = \int p(x+u)du = \int \frac{q(u|x)}{q(u|x)}p(x+u)du = \mathbb{E}_{u\sim q(u|x)}\left[\frac{p(x+u)}{q(u|x)} \right]$$
with $q(u|x)$ being the noise distribution. For now, we assume it to be uniform, which can also be written as $p(x)=\mathbb{E}_{u\sim U(0,1)^D}\left[p(x+u) \right]$.
In the following, we will implement Dequantization as a flow transformation itself. After adding noise to the discrete values, we additionally transform the volume into a Gaussian-like shape. This is done by scaling $x+u$ between $0$ and $1$, and applying the invert of the sigmoid function $\sigma(z)^{-1} = \log z - \log 1-z$. If we would not do this, we would face two problems:
1. The input is scaled between 0 and 256 while the prior distribution is a Gaussian with mean $0$ and standard deviation $1$. In the first iterations after initializing the parameters of the flow, we would have extremely low likelihoods for large values like $256$. This would cause the training to diverge instantaneously.
2. As the output distribution is a Gaussian, it is beneficial for the flow to have a similarly shaped input distribution. This will reduce the modeling complexity that is required by the flow.
Overall, we can implement dequantization as follows:
```
class Dequantization(nn.Module):
def __init__(self, alpha=1e-5, quants=256):
"""
Inputs:
alpha - small constant that is used to scale the original input.
Prevents dealing with values very close to 0 and 1 when inverting the sigmoid
quants - Number of possible discrete values (usually 256 for 8-bit image)
"""
super().__init__()
self.alpha = alpha
self.quants = quants
def forward(self, z, ldj, reverse=False):
if not reverse:
z, ldj = self.dequant(z, ldj)
z, ldj = self.sigmoid(z, ldj, reverse=True)
else:
z, ldj = self.sigmoid(z, ldj, reverse=False)
z = z * self.quants
ldj += np.log(self.quants) * np.prod(z.shape[1:])
z = torch.floor(z).clamp(min=0, max=self.quants-1).to(torch.int32)
return z, ldj
def sigmoid(self, z, ldj, reverse=False):
# Applies an invertible sigmoid transformation
if not reverse:
ldj += (-z-2*F.softplus(-z)).sum(dim=[1,2,3])
z = torch.sigmoid(z)
else:
z = z * (1 - self.alpha) + 0.5 * self.alpha # Scale to prevent boundaries 0 and 1
ldj += np.log(1 - self.alpha) * np.prod(z.shape[1:])
ldj += (-torch.log(z) - torch.log(1-z)).sum(dim=[1,2,3])
z = torch.log(z) - torch.log(1-z)
return z, ldj
def dequant(self, z, ldj):
# Transform discrete values to continuous volumes
z = z.to(torch.float32)
z = z + torch.rand_like(z).detach()
z = z / self.quants
ldj -= np.log(self.quants) * np.prod(z.shape[1:])
return z, ldj
```
A good check whether a flow is correctly implemented or not, is to verify that it is invertible. Hence, we will dequantize a randomly chosen training image, and then quantize it again. We would expect that we would get the exact same image out:
```
## Testing invertibility of dequantization layer
pl.seed_everything(42)
orig_img = train_set[0][0].unsqueeze(dim=0)
ldj = torch.zeros(1,)
dequant_module = Dequantization()
deq_img, ldj = dequant_module(orig_img, ldj, reverse=False)
reconst_img, ldj = dequant_module(deq_img, ldj, reverse=True)
d1, d2 = torch.where(orig_img.squeeze() != reconst_img.squeeze())
if len(d1) != 0:
print("Dequantization was not invertible.")
for i in range(d1.shape[0]):
print("Original value:", orig_img[0,0,d1[i], d2[i]].item())
print("Reconstructed value:", reconst_img[0,0,d1[i], d2[i]].item())
else:
print("Successfully inverted dequantization")
# Layer is not strictly invertible due to float precision constraints
# assert (orig_img == reconst_img).all().item()
```
In contrast to our expectation, the test fails. However, this is no reason to doubt our implementation here as only one single value is not equal to the original. This is caused due to numerical inaccuracies in the sigmoid invert. While the input space to the inverted sigmoid is scaled between 0 and 1, the output space is between $-\infty$ and $\infty$. And as we use 32 bits to represent the numbers (in addition to applying logs over and over again), such inaccuries can occur and should not be worrisome. Nevertheless, it is good to be aware of them, and can be improved by using a double tensor (float64).
Finally, we can take our dequantization and actually visualize the distribution it transforms the discrete values into:
```
def visualize_dequantization(quants, prior=None):
"""
Function for visualizing the dequantization values of discrete values in continuous space
"""
# Prior over discrete values. If not given, a uniform is assumed
if prior is None:
prior = np.ones(quants, dtype=np.float32) / quants
prior = prior / prior.sum() * quants # In the following, we assume 1 for each value means uniform distribution
inp = torch.arange(-4, 4, 0.01).view(-1, 1, 1, 1) # Possible continuous values we want to consider
ldj = torch.zeros(inp.shape[0])
dequant_module = Dequantization(quants=quants)
# Invert dequantization on continuous values to find corresponding discrete value
out, ldj = dequant_module.forward(inp, ldj, reverse=True)
inp, out, prob = inp.squeeze().numpy(), out.squeeze().numpy(), ldj.exp().numpy()
prob = prob * prior[out] # Probability scaled by categorical prior
# Plot volumes and continuous distribution
sns.set_style("white")
fig = plt.figure(figsize=(6,3))
x_ticks = []
for v in np.unique(out):
indices = np.where(out==v)
color = to_rgb(f"C{v}")
plt.fill_between(inp[indices], prob[indices], np.zeros(indices[0].shape[0]), color=color+(0.5,), label=str(v))
plt.plot([inp[indices[0][0]]]*2, [0, prob[indices[0][0]]], color=color)
plt.plot([inp[indices[0][-1]]]*2, [0, prob[indices[0][-1]]], color=color)
x_ticks.append(inp[indices[0][0]])
x_ticks.append(inp.max())
plt.xticks(x_ticks, [f"{x:.1f}" for x in x_ticks])
plt.plot(inp,prob, color=(0.0,0.0,0.0))
# Set final plot properties
plt.ylim(0, prob.max()*1.1)
plt.xlim(inp.min(), inp.max())
plt.xlabel("z")
plt.ylabel("Probability")
plt.title(f"Dequantization distribution for {quants} discrete values")
plt.legend()
plt.show()
plt.close()
visualize_dequantization(quants=8)
```
The visualized distribution show the sub-volumes that are assigned to the different discrete values. The value $0$ has its volume between $[-\infty, -1.9)$, the value $1$ is represented by the interval $[-1.9, -1.1)$, etc. The volume for each discrete value has the same probability mass. That's why the volumes close to the center (e.g. 3 and 4) have a smaller area on the z-axis as others ($z$ is being used to denote the output of the whole dequantization flow).
Effectively, the consecutive normalizing flow models discrete images by the following objective:
$$\log p(x) = \log \mathbb{E}_{u\sim q(u|x)}\left[\frac{p(x+u)}{q(u|x)} \right] \geq \mathbb{E}_{u}\left[\log \frac{p(x+u)}{q(u|x)} \right]$$
Although normalizing flows are exact in likelihood, we have a lower bound. Specifically, this is an example of the Jensen inequality because we need to move the log into the expectation so we can use Monte-carlo estimates. In general, this bound is considerably smaller than the ELBO in variational autoencoders. Actually, we can reduce the bound ourselves by estimating the expectation not by one, but by $M$ samples. In other words, we can apply importance sampling which leads to the following inequality:
$$\log p(x) = \log \mathbb{E}_{u\sim q(u|x)}\left[\frac{p(x+u)}{q(u|x)} \right] \geq \mathbb{E}_{u}\left[\log \frac{1}{M} \sum_{m=1}^{M} \frac{p(x+u_m)}{q(u_m|x)} \right] \geq \mathbb{E}_{u}\left[\log \frac{p(x+u)}{q(u|x)} \right]$$
The importance sampling $\frac{1}{M} \sum_{m=1}^{M} \frac{p(x+u_m)}{q(u_m|x)}$ becomes $\mathbb{E}_{u\sim q(u|x)}\left[\frac{p(x+u)}{q(u|x)} \right]$ if $M\to \infty$, so that the more samples we use, the tighter the bound is. During testing, we can make use of this property and have it implemented in `test_step` in `ImageFlow`. In theory, we could also use this tighter bound during training. However, related work has shown that this does not necessarily lead to an improvement given the additional computational cost, and it is more efficient to stick with a single estimate [5].
### Variational Dequantization
Dequantization uses a uniform distribution for the noise $u$ which effectively leads to images being represented as hypercubes (cube in high dimensions) with sharp borders. However, modeling such sharp borders is not easy for a flow as it uses smooth transformations to convert it into a Gaussian distribution.
Another way of looking at it is if we change the prior distribution in the previous visualization. Imagine we have independent Gaussian noise on pixels which is commonly the case for any real-world taken picture. Therefore, the flow would have to model a distribution as above, but with the individual volumes scaled as follows:
```
visualize_dequantization(quants=8, prior=np.array([0.075, 0.2, 0.4, 0.2, 0.075, 0.025, 0.0125, 0.0125]))
```
Transforming such a probability into a Gaussian is a difficult task, especially with such hard borders. Dequantization has therefore been extended to more sophisticated, learnable distributions beyond uniform in a variational framework. In particular, if we remember the learning objective $\log p(x) = \log \mathbb{E}_{u}\left[\frac{p(x+u)}{q(u|x)} \right]$, the uniform distribution can be replaced by a learned distribution $q_{\theta}(u|x)$ with support over $u\in[0,1)^D$. This approach is called Variational Dequantization and has been proposed by Ho et al. [3]. How can we learn such a distribution? We can use a second normalizing flow that takes $x$ as external input and learns a flexible distribution over $u$. To ensure a support over $[0,1)^D$, we can apply a sigmoid activation function as final flow transformation.
Inheriting the original dequantization class, we can implement variational dequantization as follows:
```
class VariationalDequantization(Dequantization):
def __init__(self, var_flows, alpha=1e-5):
"""
Inputs:
var_flows - A list of flow transformations to use for modeling q(u|x)
alpha - Small constant, see Dequantization for details
"""
super().__init__(alpha=alpha)
self.flows = nn.ModuleList(var_flows)
def dequant(self, z, ldj):
z = z.to(torch.float32)
img = (z / 255.0) * 2 - 1 # We condition the flows on x, i.e. the original image
# Prior of u is a uniform distribution as before
# As most flow transformations are defined on [-infinity,+infinity], we apply an inverse sigmoid first.
deq_noise = torch.rand_like(z).detach()
deq_noise, ldj = self.sigmoid(deq_noise, ldj, reverse=True)
for flow in self.flows:
deq_noise, ldj = flow(deq_noise, ldj, reverse=False, orig_img=img)
deq_noise, ldj = self.sigmoid(deq_noise, ldj, reverse=False)
# After the flows, apply u as in standard dequantization
z = (z + deq_noise) / 256.0
ldj -= np.log(256.0) * np.prod(z.shape[1:])
return z, ldj
```
Variational dequantization can be used as a substitute for dequantization. We will compare dequantization and variational dequantization in later experiments.
### Coupling layers
Next, we look at possible transformations to apply inside the flow. A recent popular flow layer, which works well in combination with deep neural networks, is the coupling layer introduced by Dinh et al. [1]. The input $z$ is arbitrarily split into two parts, $z_{1:j}$ and $z_{j+1:d}$, of which the first remains unchanged by the flow. Yet, $z_{1:j}$ is used to parameterize the transformation for the second part, $z_{j+1:d}$. Various transformations have been proposed in recent time [3,4], but here we will settle for the simplest and most efficient one: affine coupling. In this coupling layer, we apply an affine transformation by shifting the input by a bias $\mu$ and scale it by $\sigma$. In other words, our transformation looks as follows:
$$z'_{j+1:d} = \mu_{\theta}(z_{1:j}) + \sigma_{\theta}(z_{1:j}) \odot z_{j+1:d}$$
The functions $\mu$ and $\sigma$ are implemented as a shared neural network, and the sum and multiplication are performed element-wise. The LDJ is thereby the sum of the logs of the scaling factors: $\sum_i \left[\log \sigma_{\theta}(z_{1:j})\right]_i$. Inverting the layer can as simply be done as subtracting the bias and dividing by the scale:
$$z_{j+1:d} = \left(z'_{j+1:d} - \mu_{\theta}(z_{1:j})\right) / \sigma_{\theta}(z_{1:j})$$
We can also visualize the coupling layer in form of a computation graph, where $z_1$ represents $z_{1:j}$, and $z_2$ represents $z_{j+1:d}$:
<center width="100%" style="padding: 10px"><img src="coupling_flow.svg" width="450px"></center>
In our implementation, we will realize the splitting of variables as masking. The variables to be transformed, $z_{j+1:d}$, are masked when passing $z$ to the shared network to predict the transformation parameters. When applying the transformation, we mask the parameters for $z_{1:j}$ so that we have an identity operation for those variables:
```
class CouplingLayer(nn.Module):
def __init__(self, network, mask, c_in):
"""
Coupling layer inside a normalizing flow.
Inputs:
network - A PyTorch nn.Module constituting the deep neural network for mu and sigma.
Output shape should be twice the channel size as the input.
mask - Binary mask (0 or 1) where 0 denotes that the element should be transformed,
while 1 means the latent will be used as input to the NN.
c_in - Number of input channels
"""
super().__init__()
self.network = network
self.scaling_factor = nn.Parameter(torch.zeros(c_in))
# Register mask as buffer as it is a tensor which is not a parameter,
# but should be part of the modules state.
self.register_buffer('mask', mask)
def forward(self, z, ldj, reverse=False, orig_img=None):
"""
Inputs:
z - Latent input to the flow
ldj - The current ldj of the previous flows.
The ldj of this layer will be added to this tensor.
reverse - If True, we apply the inverse of the layer.
orig_img (optional) - Only needed in VarDeq. Allows external
input to condition the flow on (e.g. original image)
"""
# Apply network to masked input
z_in = z * self.mask
if orig_img is None:
nn_out = self.network(z_in)
else:
nn_out = self.network(torch.cat([z_in, orig_img], dim=1))
s, t = nn_out.chunk(2, dim=1)
# Stabilize scaling output
s_fac = self.scaling_factor.exp().view(1, -1, 1, 1)
s = torch.tanh(s / s_fac) * s_fac
# Mask outputs (only transform the second part)
s = s * (1 - self.mask)
t = t * (1 - self.mask)
# Affine transformation
if not reverse:
# Whether we first shift and then scale, or the other way round,
# is a design choice, and usually does not have a big impact
z = (z + t) * torch.exp(s)
ldj += s.sum(dim=[1,2,3])
else:
z = (z * torch.exp(-s)) - t
ldj -= s.sum(dim=[1,2,3])
return z, ldj
```
For stabilization purposes, we apply a $\tanh$ activation function on the scaling output. This prevents sudden large output values for the scaling that can destabilize training. To still allow scaling factors smaller or larger than -1 and 1 respectively, we have a learnable parameter per dimension, called `scaling_factor`. This scales the tanh to different limits. Below, we visualize the effect of the scaling factor on the output activation of the scaling terms:
```
with torch.no_grad():
x = torch.arange(-5,5,0.01)
scaling_factors = [0.5, 1, 2]
sns.set()
fig, ax = plt.subplots(1, 3, figsize=(12,3))
for i, scale in enumerate(scaling_factors):
y = torch.tanh(x / scale) * scale
ax[i].plot(x.numpy(), y.numpy())
ax[i].set_title("Scaling factor: " + str(scale))
ax[i].set_ylim(-3, 3)
plt.subplots_adjust(wspace=0.4)
sns.reset_orig()
plt.show()
```
Coupling layers generalize to any masking technique we could think of. However, the most common approach for images is to split the input $z$ in half, using a checkerboard mask or channel mask. A checkerboard mask splits the variables across the height and width dimensions and assigns each other pixel to $z_{j+1:d}$. Thereby, the mask is shared across channels. In contrast, the channel mask assigns half of the channels to $z_{j+1:d}$, and the other half to $z_{1:j+1}$. Note that when we apply multiple coupling layers, we invert the masking for each other layer so that each variable is transformed a similar amount of times.
Let's implement a function that creates a checkerboard mask and a channel mask for us:
```
def create_checkerboard_mask(h, w, invert=False):
x, y = torch.arange(h, dtype=torch.int32), torch.arange(w, dtype=torch.int32)
xx, yy = torch.meshgrid(x, y)
mask = torch.fmod(xx + yy, 2)
mask = mask.to(torch.float32).view(1, 1, h, w)
if invert:
mask = 1 - mask
return mask
def create_channel_mask(c_in, invert=False):
mask = torch.cat([torch.ones(c_in//2, dtype=torch.float32),
torch.zeros(c_in-c_in//2, dtype=torch.float32)])
mask = mask.view(1, c_in, 1, 1)
if invert:
mask = 1 - mask
return mask
```
We can also visualize the corresponding masks for an image of size $8\times 8\times 2$ (2 channels):
```
checkerboard_mask = create_checkerboard_mask(h=8, w=8).expand(-1,2,-1,-1)
channel_mask = create_channel_mask(c_in=2).expand(-1,-1,8,8)
show_imgs(checkerboard_mask.transpose(0,1), "Checkerboard mask")
show_imgs(channel_mask.transpose(0,1), "Channel mask")
```
As a last aspect of coupling layers, we need to decide for the deep neural network we want to apply in the coupling layers. The input to the layers is an image, and hence we stick with a CNN. Because the input to a transformation depends on all transformations before, it is crucial to ensure a good gradient flow through the CNN back to the input, which can be optimally achieved by a ResNet-like architecture. Specifically, we use a Gated ResNet that adds a $\sigma$-gate to the skip connection, similarly to the input gate in LSTMs. The details are not necessarily important here, and the network is strongly inspired from Flow++ [3] in case you are interested in building even stronger models.
```
class ConcatELU(nn.Module):
"""
Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, x):
return torch.cat([F.elu(x), F.elu(-x)], dim=1)
class LayerNormChannels(nn.Module):
def __init__(self, c_in):
"""
This module applies layer norm across channels in an image. Has been shown to work well with ResNet connections.
Inputs:
c_in - Number of channels of the input
"""
super().__init__()
self.layer_norm = nn.LayerNorm(c_in)
def forward(self, x):
x = x.permute(0, 2, 3, 1)
x = self.layer_norm(x)
x = x.permute(0, 3, 1, 2)
return x
class GatedConv(nn.Module):
def __init__(self, c_in, c_hidden):
"""
This module applies a two-layer convolutional ResNet block with input gate
Inputs:
c_in - Number of channels of the input
c_hidden - Number of hidden dimensions we want to model (usually similar to c_in)
"""
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(c_in, c_hidden, kernel_size=3, padding=1),
ConcatELU(),
nn.Conv2d(2*c_hidden, 2*c_in, kernel_size=1)
)
def forward(self, x):
out = self.net(x)
val, gate = out.chunk(2, dim=1)
return x + val * torch.sigmoid(gate)
class GatedConvNet(nn.Module):
def __init__(self, c_in, c_hidden=32, c_out=-1, num_layers=3):
"""
Module that summarizes the previous blocks to a full convolutional neural network.
Inputs:
c_in - Number of input channels
c_hidden - Number of hidden dimensions to use within the network
c_out - Number of output channels. If -1, 2 times the input channels are used (affine coupling)
num_layers - Number of gated ResNet blocks to apply
"""
super().__init__()
c_out = c_out if c_out > 0 else 2 * c_in
layers = []
layers += [nn.Conv2d(c_in, c_hidden, kernel_size=3, padding=1)]
for layer_index in range(num_layers):
layers += [GatedConv(c_hidden, c_hidden),
LayerNormChannels(c_hidden)]
layers += [ConcatELU(),
nn.Conv2d(2*c_hidden, c_out, kernel_size=3, padding=1)]
self.nn = nn.Sequential(*layers)
self.nn[-1].weight.data.zero_()
self.nn[-1].bias.data.zero_()
def forward(self, x):
return self.nn(x)
```
### Training loop
Finally, we can add Dequantization, Variational Dequantization and Coupling Layers together to build our full normalizing flow on MNIST images. We apply 8 coupling layers in the main flow, and 4 for variational dequantization if applied. We apply a checkerboard mask throughout the network as with a single channel (black-white images), we cannot apply channel mask. The overall architecture is visualized below.
<center width="100%" style="padding: 20px"><img src="vanilla_flow.svg" width="900px"></center>
```
def create_simple_flow(use_vardeq=True):
flow_layers = []
if use_vardeq:
vardeq_layers = [CouplingLayer(network=GatedConvNet(c_in=2, c_out=2, c_hidden=16),
mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),
c_in=1) for i in range(4)]
flow_layers += [VariationalDequantization(var_flows=vardeq_layers)]
else:
flow_layers += [Dequantization()]
for i in range(8):
flow_layers += [CouplingLayer(network=GatedConvNet(c_in=1, c_hidden=32),
mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),
c_in=1)]
flow_model = ImageFlow(flow_layers).to(device)
return flow_model
```
For implementing the training loop, we use the framework of PyTorch Lightning and reduce the code overhead. If interested, you can take a look at the generated tensorboard file, in particularly the graph to see an overview of flow transformations that are applied. Note that we again provide pre-trained models (see later on in the notebook) as normalizing flows are particularly expensive to train. We have also run validation and testing as this can take some time as well with the added importance sampling.
```
def train_flow(flow, model_name="MNISTFlow"):
# Create a PyTorch Lightning trainer
trainer = pl.Trainer(default_root_dir=os.path.join(CHECKPOINT_PATH, model_name),
gpus=1 if torch.cuda.is_available() else 0,
max_epochs=200,
gradient_clip_val=1.0,
callbacks=[ModelCheckpoint(save_weights_only=True, mode="min", monitor="val_bpd"),
LearningRateMonitor("epoch")])
trainer.logger._log_graph = True
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
train_data_loader = data.DataLoader(train_set, batch_size=128, shuffle=True, drop_last=True, pin_memory=True, num_workers=8)
result = None
# Check whether pretrained model exists. If yes, load it and skip training
pretrained_filename = os.path.join(CHECKPOINT_PATH, model_name + ".ckpt")
if os.path.isfile(pretrained_filename):
print("Found pretrained model, loading...")
ckpt = torch.load(pretrained_filename)
flow.load_state_dict(ckpt['state_dict'])
result = ckpt.get("result", None)
else:
print("Start training", model_name)
trainer.fit(flow, train_data_loader, val_loader)
# Test best model on validation and test set if no result has been found
# Testing can be expensive due to the importance sampling.
if result is None:
val_result = trainer.test(flow, val_loader, verbose=False)
start_time = time.time()
test_result = trainer.test(flow, test_loader, verbose=False)
duration = time.time() - start_time
result = {"test": test_result, "val": val_result, "time": duration / len(test_loader) / flow.import_samples}
return flow, result
```
## Multi-scale architecture
One disadvantage of normalizing flows is that they operate on the exact same dimensions as the input. If the input is high-dimensional, so is the latent space, which requires larger computational cost to learn suitable transformations. However, particularly in the image domain, many pixels contain less information in the sense that we could remove them without loosing the semantical information of the image.
Based on this intuition, deep normalizing flows on images commonly apply a multi-scale architecture [1]. After the first $N$ flow transformations, we split off half of the latent dimensions and directly evaluate them on the prior. The other half is run through $N$ more flow transformations, and depending on the size of the input, we split it again in half or stop overall at this position. The two operations involved in this setup is `Squeeze` and `Split` which we will review more closely and implement below.
### Squeeze and Split
When we want to remove half of the pixels in an image, we have the problem of deciding which variables to cut, and how to rearrange the image. Thus, the squeezing operation is commonly used before split, which divides the image into subsquares of shape $2\times 2\times C$, and reshapes them into $1\times 1\times 4C$ blocks. Effectively, we reduce the height and width of the image by a factor of 2 while scaling the number of channels by 4. Afterwards, we can perform the split operation over channels without the need of rearranging the pixels. The smaller scale also makes the overall architecture more efficient. Visually, the squeeze operation should transform the input as follows:
<center><img src="Squeeze_operation.svg" width="40%"/></center>
The input of $4\times 4\times 1$ is scaled to $2\times 2\times 4$ following the idea of grouping the pixels in $2\times 2\times 1$ subsquares. Next, let's try to implement this layer:
```
class SqueezeFlow(nn.Module):
def forward(self, z, ldj, reverse=False):
B, C, H, W = z.shape
if not reverse:
# Forward direction: H x W x C => H/2 x W/2 x 4C
z = z.reshape(B, C, H//2, 2, W//2, 2)
z = z.permute(0, 1, 3, 5, 2, 4)
z = z.reshape(B, 4*C, H//2, W//2)
else:
# Reverse direction: H/2 x W/2 x 4C => H x W x C
z = z.reshape(B, C//4, 2, 2, H, W)
z = z.permute(0, 1, 4, 2, 5, 3)
z = z.reshape(B, C//4, H*2, W*2)
return z, ldj
```
Before moving on, we can verify our implementation by comparing our output with the example figure above:
```
sq_flow = SqueezeFlow()
rand_img = torch.arange(1,17).view(1, 1, 4, 4)
print("Image (before)\n", rand_img)
forward_img, _ = sq_flow(rand_img, ldj=None, reverse=False)
print("\nImage (forward)\n", forward_img.permute(0,2,3,1)) # Permute for readability
reconst_img, _ = sq_flow(forward_img, ldj=None, reverse=True)
print("\nImage (reverse)\n", reconst_img)
```
The split operation divides the input into two parts, and evaluates one part directly on the prior. So that our flow operation fits to the implementation of the previous layers, we will return the prior probability of the first part as the log determinant jacobian of the layer. It has the same effect as if we would combine all variable splits at the end of the flow, and evaluate them together on the prior.
```
class SplitFlow(nn.Module):
def __init__(self):
super().__init__()
self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)
def forward(self, z, ldj, reverse=False):
if not reverse:
z, z_split = z.chunk(2, dim=1)
ldj += self.prior.log_prob(z_split).sum(dim=[1,2,3])
else:
z_split = self.prior.sample(sample_shape=z.shape).to(device)
z = torch.cat([z, z_split], dim=1)
ldj -= self.prior.log_prob(z_split).sum(dim=[1,2,3])
return z, ldj
```
### Building a multi-scale flow
After defining the squeeze and split operation, we are finally able to build our own multi-scale flow. Deep normalizing flows such as Glow and Flow++ [2,3] often apply a split operation directly after squeezing. However, with shallow flows, we need to be more thoughtful about where to place the split operation as we need at least a minimum amount of transformations on each variable. Our setup is inspired by the original RealNVP architecture [1] which is shallower than other, more recent state-of-the-art architectures.
Hence, for the MNIST dataset, we will apply the first squeeze operation after two coupling layers, but don't apply a split operation yet. Because we have only used two coupling layers and each the variable has been only transformed once, a split operation would be too early. We apply two more coupling layers before finally applying a split flow and squeeze again. The last four coupling layers operate on a scale of $7\times 7\times 8$. The full flow architecture is shown below.
<center width="100%" style="padding: 20px"><img src="multiscale_flow.svg" width="1100px"></center>
Note that while the feature maps inside the coupling layers reduce with the height and width of the input, the increased number of channels is not directly considered. To counteract this, we increase the hidden dimensions for the coupling layers on the squeezed input. The dimensions are often scaled by 2 as this approximately increases the computation cost by 4 canceling with the squeezing operation. However, we will choose the hidden dimensionalities $32, 48, 64$ for the three scales respectively to keep the number of parameters reasonable and show the efficiency of multi-scale architectures.
```
def create_multiscale_flow():
flow_layers = []
vardeq_layers = [CouplingLayer(network=GatedConvNet(c_in=2, c_out=2, c_hidden=16),
mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),
c_in=1) for i in range(4)]
flow_layers += [VariationalDequantization(vardeq_layers)]
flow_layers += [CouplingLayer(network=GatedConvNet(c_in=1, c_hidden=32),
mask=create_checkerboard_mask(h=28, w=28, invert=(i%2==1)),
c_in=1) for i in range(2)]
flow_layers += [SqueezeFlow()]
for i in range(2):
flow_layers += [CouplingLayer(network=GatedConvNet(c_in=4, c_hidden=48),
mask=create_channel_mask(c_in=4, invert=(i%2==1)),
c_in=4)]
flow_layers += [SplitFlow(),
SqueezeFlow()]
for i in range(4):
flow_layers += [CouplingLayer(network=GatedConvNet(c_in=8, c_hidden=64),
mask=create_channel_mask(c_in=8, invert=(i%2==1)),
c_in=8)]
flow_model = ImageFlow(flow_layers).to(device)
return flow_model
```
We can show the difference in number of parameters below:
```
def print_num_params(model):
num_params = sum([np.prod(p.shape) for p in model.parameters()])
print("Number of parameters: {:,}".format(num_params))
print_num_params(create_simple_flow(use_vardeq=False))
print_num_params(create_simple_flow(use_vardeq=True))
print_num_params(create_multiscale_flow())
```
Although the multi-scale flow has almost 3 times the parameters of the single scale flow, it is not necessarily more computationally expensive than its counterpart. We will compare the runtime in the following experiments as well.
## Analysing the flows
In the last part of the notebook, we will train all the models we have implemented above, and try to analyze the effect of the multi-scale architecture and variational dequantization.
### Training flow variants
Before we can analyse the flow models, we need to train them first. We provide pre-trained models that contain the validation and test performance, and run-time information. As flow models are computationally expensive, we advice you to rely on those pretrained models for a first run through the notebook.
```
flow_dict = {"simple": {}, "vardeq": {}, "multiscale": {}}
flow_dict["simple"]["model"], flow_dict["simple"]["result"] = train_flow(create_simple_flow(use_vardeq=False), model_name="MNISTFlow_simple")
flow_dict["vardeq"]["model"], flow_dict["vardeq"]["result"] = train_flow(create_simple_flow(use_vardeq=True), model_name="MNISTFlow_vardeq")
flow_dict["multiscale"]["model"], flow_dict["multiscale"]["result"] = train_flow(create_multiscale_flow(), model_name="MNISTFlow_multiscale")
```
### Density modeling and sampling
Firstly, we can compare the models on their quantitative results. The following table shows all important statistics. The inference time specifies the time needed to determine the probability for a batch of 64 images for each model, and the sampling time the duration it took to sample a batch of 64 images.
```
%%html
<!-- Some HTML code to increase font size in the following table -->
<style>
th {font-size: 120%;}
td {font-size: 120%;}
</style>
import tabulate
from IPython.display import display, HTML
table = [[key,
"%4.3f bpd" % flow_dict[key]["result"]["val"][0]["test_bpd"],
"%4.3f bpd" % flow_dict[key]["result"]["test"][0]["test_bpd"],
"%2.0f ms" % (1000 * flow_dict[key]["result"]["time"]),
"%2.0f ms" % (1000 * flow_dict[key]["result"].get("samp_time", 0)),
"{:,}".format(sum([np.prod(p.shape) for p in flow_dict[key]["model"].parameters()]))]
for key in flow_dict]
display(HTML(tabulate.tabulate(table, tablefmt='html', headers=["Model", "Validation Bpd", "Test Bpd", "Inference time", "Sampling time", "Num Parameters"])))
```
As we have intially expected, using variational dequantization improves upon standard dequantization in terms of bits per dimension. Although the difference with 0.04bpd doesn't seem impressive first, it is a considerably step for generative models (most state-of-the-art models improve upon previous models in a range of 0.02-0.1bpd on CIFAR with three times as high bpd). While it takes longer to evaluate the probability of an image due to the variational dequantization, which also leads to a longer training time, it does not have an effect on the sampling time. This is because inverting variational dequantization is the same as dequantization: finding the next lower integer.
When we compare the two models to multi-scale architecture, we can see that the bits per dimension score again dropped by about 0.04bpd. Additionally, the inference time and sampling time improved notably despite having more parameters. Thus, we see that the multi-scale flow is not only stronger for density modeling, but also more efficient.
Next, we can test the sampling quality of the models. We should note that the samples for variational dequantization and standard dequantization are very similar, and hence we visualize here only the ones for variational dequantization and the multi-scale model. However, feel free to also test out the `"simple"` model. The seeds are set to obtain reproducable generations and are not cherry picked.
```
pl.seed_everything(44)
samples = flow_dict["vardeq"]["model"].sample(img_shape=[16,1,28,28])
show_imgs(samples.cpu())
pl.seed_everything(44)
samples = flow_dict["multiscale"]["model"].sample(img_shape=[16,8,7,7])
show_imgs(samples.cpu())
```
From the few samples, we can see a clear difference between the simple and the multi-scale model. The single-scale model has only learned local, small correlations while the multi-scale model was able to learn full, global relations that form digits. This show-cases another benefit of the multi-scale model. In contrast to VAEs, the outputs are sharp as normalizing flows can naturally model complex, multi-modal distributions while VAEs have the independent decoder output noise. Nevertheless, the samples from this flow are far from perfect as not all samples show true digits.
### Interpolation in latent space
Another popular test for the smoothness of the latent space of generative models is to interpolate between two training examples. As normalizing flows are strictly invertible, we can guarantee that any image is represented in the latent space. We again compare the variational dequantization model with the multi-scale model below.
```
@torch.no_grad()
def interpolate(model, img1, img2, num_steps=8):
"""
Inputs:
model - object of ImageFlow class that represents the (trained) flow model
img1, img2 - Image tensors of shape [1, 28, 28]. Images between which should be interpolated.
num_steps - Number of interpolation steps. 8 interpolation steps mean 6 intermediate pictures besides img1 and img2
"""
imgs = torch.stack([img1, img2], dim=0).to(model.device)
z, _ = model.encode(imgs)
alpha = torch.linspace(0, 1, steps=num_steps, device=z.device).view(-1, 1, 1, 1)
interpolations = z[0:1] * alpha + z[1:2] * (1 - alpha)
interp_imgs = model.sample(interpolations.shape[:1] + imgs.shape[1:], z_init=interpolations)
show_imgs(interp_imgs, row_size=8)
exmp_imgs, _ = next(iter(train_loader))
pl.seed_everything(42)
for i in range(2):
interpolate(flow_dict["vardeq"]["model"], exmp_imgs[2*i], exmp_imgs[2*i+1])
pl.seed_everything(42)
for i in range(2):
interpolate(flow_dict["multiscale"]["model"], exmp_imgs[2*i], exmp_imgs[2*i+1])
```
The interpolations of the multi-scale model result in more realistic digits (first row $7\leftrightarrow 8\leftrightarrow 6$, second row $9\leftrightarrow 4\leftrightarrow 6$), while the variational dequantization model focuses on local patterns that globally do not form a digit. For the multi-scale model, we actually did not do the "true" interpolation between the two images as we did not consider the variables that were split along the flow (they have been sampled randomly for all samples). However, as we will see in the next experiment, the early variables do not effect the overall image much.
### Visualization of latents in different levels of multi-scale
In the following we will focus more on the multi-scale flow. We want to analyse what information is being stored in the variables split at early layers, and what information for the final variables. For this, we sample 8 images where each of them share the same final latent variables, but differ in the other part of the latent variables. Below we visualize three examples of this:
```
pl.seed_everything(44)
for _ in range(3):
z_init = flow_dict["multiscale"]["model"].prior.sample(sample_shape=[1,8,7,7])
z_init = z_init.expand(8, -1, -1, -1)
samples = flow_dict["multiscale"]["model"].sample(img_shape=z_init.shape, z_init=z_init)
show_imgs(samples.cpu())
```
We see that the early split variables indeed have a smaller effect on the image. Still, small differences can be spot when we look carefully at the borders of the digits. For instance, the hole at the top of the 8 changes for different samples although all of them represent the same coarse structure. This shows that the flow indeed learns to separate the higher-level information in the final variables, while the early split ones contain local noise patterns.
### Visualizing Dequantization
As a final part of this notebook, we will look at the effect of variational dequantization. We have motivated variational dequantization by the issue of sharp edges/boarders being difficult to model, and a flow would rather prefer smooth, prior-like distributions. To check how what noise distribution $q(u|x)$ the flows in the variational dequantization module have learned, we can plot a histogram of output values from the dequantization and variational dequantization module.
```
def visualize_dequant_distribution(model : ImageFlow, imgs : torch.Tensor, title:str=None):
"""
Inputs:
model - The flow of which we want to visualize the dequantization distribution
imgs - Example training images of which we want to visualize the dequantization distribution
"""
imgs = imgs.to(device)
ldj = torch.zeros(imgs.shape[0], dtype=torch.float32).to(device)
with torch.no_grad():
dequant_vals = []
for _ in tqdm(range(8), leave=False):
d, _ = model.flows[0](imgs, ldj, reverse=False)
dequant_vals.append(d)
dequant_vals = torch.cat(dequant_vals, dim=0)
dequant_vals = dequant_vals.view(-1).cpu().numpy()
sns.set()
plt.figure(figsize=(10,3))
plt.hist(dequant_vals, bins=256, color=to_rgb("C0")+(0.5,), edgecolor="C0", density=True)
if title is not None:
plt.title(title)
plt.show()
plt.close()
sample_imgs, _ = next(iter(train_loader))
visualize_dequant_distribution(flow_dict["simple"]["model"], sample_imgs, title="Dequantization")
visualize_dequant_distribution(flow_dict["vardeq"]["model"], sample_imgs, title="Variational dequantization")
```
The dequantization distribution in the first plot shows that the MNIST images have a strong bias towards 0 (black), and the distribution of them have a sharp border as mentioned before. The variational dequantization module has indeed learned a much smoother distribution with a Gaussian-like curve which can be modeled much better. For the other values, we would need to visualize the distribution $q(u|x)$ on a deeper level, depending on $x$. However, as all $u$'s interact and depend on each other, we would need to visualize a distribution in 784 dimensions, which is not that intuitive anymore.
## Conclusion
In conclusion, we have seen how to implement our own normalizing flow, and what difficulties arise if we want to apply them on images. Dequantization is a crucial step in mapping the discrete images into continuous space to prevent underisable delta-peak solutions. While dequantization creates hypercubes with hard border, variational dequantization allows us to fit a flow much better on the data. This allows us to obtain a lower bits per dimension score, while not affecting the sampling speed. The most common flow element, the coupling layer, is simple to implement, and yet effective. Furthermore, multi-scale architectures help to capture the global image context while allowing us to efficiently scale up the flow. Normalizing flows are an interesting alternative to VAEs as they allow an exact likelihood estimate in continuous space, and we have the guarantee that every possible input $x$ has a corresponding latent vector $z$. However, even beyond continuous inputs and images, flows can be applied and allow us to exploit the data structure in latent space, as e.g. on graphs for the task of molecule generation [6]. Recent advances in [Neural ODEs](https://arxiv.org/pdf/1806.07366.pdf) allow a flow with infinite number of layers, called Continuous Normalizing Flows, whose potential is yet to fully explore. Overall, normalizing flows are an exciting research area which will continue over the next couple of years.
## References
[1] Dinh, L., Sohl-Dickstein, J., and Bengio, S. (2017). “Density estimation using Real NVP,” In: 5th International Conference on Learning Representations, ICLR 2017. [Link](https://arxiv.org/abs/1605.08803)
[2] Kingma, D. P., and Dhariwal, P. (2018). “Glow: Generative Flow with Invertible 1x1 Convolutions,” In: Advances in Neural Information Processing Systems, vol. 31, pp. 10215--10224. [Link](http://papers.nips.cc/paper/8224-glow-generative-flow-with-invertible-1x1-convolutions.pdf)
[3] Ho, J., Chen, X., Srinivas, A., Duan, Y., and Abbeel, P. (2019). “Flow++: Improving Flow-Based Generative Models with Variational Dequantization and Architecture Design,” in Proceedings of the 36th International Conference on Machine Learning, vol. 97, pp. 2722–2730. [Link](https://arxiv.org/abs/1902.00275)
[4] Durkan, C., Bekasov, A., Murray, I., and Papamakarios, G. (2019). “Neural Spline Flows,” In: Advances in Neural Information Processing Systems, pp. 7509–7520. [Link](http://papers.neurips.cc/paper/8969-neural-spline-flows.pdf)
[5] Hoogeboom, E., Cohen, T. S., and Tomczak, J. M. (2020). “Learning Discrete Distributions by Dequantization,” arXiv preprint arXiv2001.11235v1. [Link](https://arxiv.org/abs/2001.11235)
[6] Lippe, P., and Gavves, E. (2021). “Categorical Normalizing Flows via Continuous Transformations,” In: International Conference on Learning Representations, ICLR 2021. [Link](https://openreview.net/pdf?id=-GLNZeVDuik)
---
[](https://github.com/phlippe/uvadlc_notebooks/) If you found this tutorial helpful, consider ⭐-ing our repository.
[](https://github.com/phlippe/uvadlc_notebooks/issues) For any questions, typos, or bugs that you found, please raise an issue on GitHub.
---
| true |
code
| 0.918077 | null | null | null | null |
|
## Polygon Environment Building
Devising scenarios for the polygon-based environments.
```
%load_ext autoreload
%autoreload 2
from mpb import MPB, MultipleMPB
from plot_stats import plot_planner_stats, plot_smoother_stats
from utils import latexify
from table import latex_table
from definitions import *
import matplotlib as mpl
import sys, os
mpl.rcParams['mathtext.fontset'] = 'cm'
# make sure to not use Level-3 fonts
mpl.rcParams['pdf.fonttype'] = 42
import matplotlib.pyplot as plt
from copy import deepcopy
%config InlineBackend.figure_format='retina'
```
### Polygon Environments
```
def visualize(scenario: str, start: {str: float}, goal: {str: float}, robot_model: str = None):
m = MPB()
m["max_planning_time"] = 60
m["env.start"] = start
m["env.goal"] = goal
m["env.type"] = "polygon"
m["env.polygon.source"] = "polygon_mazes/%s.svg" % scenario
if robot_model:
print("Using robot model %s." % robot_model)
m["env.collision.robot_shape_source"] = robot_model
m.set_planners(['informed_rrt_star'])
m.set_planners(['bfmt'])
m["steer.car_turning_radius"] = 2
# m.set_planners(["sbpl_mha"])
m["sbpl.scaling"] = 1
if m.run(id="test_%s" % scenario, runs=1) == 0:
m.visualize_trajectories(draw_start_goal_thetas=True, plot_every_nth_polygon=10, silence=True, save_file="plots/%s.pdf" % scenario)
m.print_info()
# visualize("parking2",
# {"theta": -1.57, "x": 12.3, "y": -2.73},
# {"theta": 0, "x": 2.5, "y": -7.27})
# visualize("parking2",
# {"theta": -1.57, "x": 12.3, "y": -2.73},
# {"theta": 3.14, "x": 2.5, "y": -7.27})
scenarios = [
("parking1", {"theta": 0, "x": 2, "y": -7.27}, {"theta": -1.58, "x": 9, "y": -11.72}),
("parking2", {"theta": 0, "x": 2.5, "y": -7.27}, {"theta": -1.57, "x": 12, "y": -3}),
("parking3", {"theta": 0, "x": 3.82, "y": -13}, {"theta": 0, "x": 29, "y": -15.5}),
("warehouse", {"theta": -1.58, "x": 7.5, "y": -10}, {"theta": 1.58, "x": 76.5, "y": -10}, "polygon_mazes/warehouse_robot.svg"),
("warehouse2", {"theta": -1.58, "x": 7.5, "y": -10}, {"theta": -1.58, "x": 116, "y": -70}, "polygon_mazes/warehouse_robot.svg")
]
list(map(lambda x: visualize(*x), scenarios));
```
# Figurehead
Figure 1 to showcase Bench-MR.
```
m = MPB()
scenario = "warehouse"
m["max_planning_time"] = 30
m["env.start"] = {"theta": -1.58, "x": 7.5, "y": -10}
m["env.goal"] = {"theta": 1.58, "x": 76.5, "y": -10}
m["env.type"] = "polygon"
m["env.polygon.source"] = "polygon_mazes/%s.svg" % scenario
m["env.collision.robot_shape_source"] = "polygon_mazes/warehouse_robot.svg"
m.set_planners([])
m.set_planners(['bfmt', 'cforest', 'prm', 'prm_star', 'informed_rrt_star', 'sbpl_mha'])
m["steer.car_turning_radius"] = 2
m["sbpl.scaling"] = 1
m.run(id="test_%s" % scenario, runs=1)
m.print_info()
m.visualize_trajectories(ignore_planners='cforest, bfmt',
draw_start_goal_thetas=True,
plot_every_nth_polygon=8,
fig_width=8,
fig_height=8,
silence=True,
save_file="plots/%s.pdf" % scenario,
num_colors=10)
```
| true |
code
| 0.489748 | null | null | null | null |
|
```
import os, sys, glob, scipy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
## Plan
1. Describe the task
2. Make the simplest visualization you can think of that contains:
- the Dependent Variable, i.e. the behavior of the participants that you're trying to model/predict/explain/account for/etc
- the Independent Variable(s), i.e. the features of the trial that you think might influence behavior
- draw each trial as a point on this graph
3. Think of possible models that would generate similar values for the DV given the observed values for the IV
## 2. Make a visualization
##### Load some data
```
base_dir = os.path.realpath('')
data_dir = base_dir + '/Data'
data = pd.read_csv(data_dir + '/Study1_UG.csv')
data = data[['sub','trial','unfairness','choice']]
data['offer'] = 100 - data['unfairness']
data.head()
```
##### Make a simple plot
```
sub = 2
sub_data = data.query('sub == 2')
sub_data.head()
```
##### Problem 1. Plot each trial independently, use transparency to visualize overlap
##### Problem 2. Plot the average over trials with the same offer
## 3. Think of a model that can recreate this plot
###### Problem 3. Define the following models
- Model 1: always accept.
- Model 2: always reject.
- Model 3: act randomly.
- Model 4: maximize payoff ('greed').
- Model 5: minimize payoff ('inverse greed').
- Model 6: unfairness punisher (reject with a probability P proportional to the unfairness of the offer).
- Model 7: inequity aversion.
```
# Always accept
def model_1(offer):
return choice
# Always reject
def model_2(offer):
return choice
# Act random
def model_3(offer):
return choice
# Maximize payoff
def model_4(offer):
return choice
# Minimize payoff
def model_5(offer):
return choice
# Unfairness punisher
def model_6(offer):
return choice
# Inequity aversion
def model_7(offer):
return choice
```
## 4. Simulating task data
```
simulated_sub_data = sub_data[['trial','offer','choice']].copy()
simulated_sub_data['choice'] = np.nan
simulated_sub_data.head()
```
##### Problem 4. Simulate task data using a model
Use one of the models you have defined above to simulate choices for the simulated_sub_data dataframe.
So here we have a dataset – basically a list of trials that together constitute an experiment – with simulated task data! We've basically generated a pseudo-subject based on one of the models we defined. In the next steps, we will compare such simulated datasets to our actually observed subject data. The more similar a model's simulation is to observed task data, the better the model 'fits' the data.
## For next time
- Get Joey's data from GitHub
- Try to code models 5, 6, and 7
- Simulate data from each model
| true |
code
| 0.318899 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/AnacletoLAB/grape/blob/main/tutorials/High_performance_graph_algorithms.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# High performance graph algorithms
A number of high performance algorithms have been implemented in Ensmallen, a considerable portion of which is an implementation of algorithms described in the literature by [David Bader](https://davidbader.net/), who we thank for his contribution to the field of graph algorithms.
See below for the algorithms available in Ensmallen.
Note that all of these algorithms are highly parallel implementations, and these benchmarks are being run on COLAB which typically provides virtual machines with a very small number of cores: on a machine with a reasonable number of cores they will execute much faster.
To install the GraPE library run:
```bash
pip install grape
```
To install exclusively the Ensmallen module, which may be useful when the TensorFlow dependency causes problems, do run:
```bash
pip install ensmallen
```
```
! pip install -q ensmallen
```
## Retrieving a graph to run the sampling on
In this tutorial we will run samples on one of the graph from the ones available from the automatic graph retrieval of Ensmallen, namely the [Homo Sapiens graph from STRING](https://string-db.org/cgi/organisms). If you want to load a graph from an edge list, just follow the examples provided from the [add reference to tutorial].
```
from ensmallen.datasets.string import HomoSapiens
```
Retrieving and loading the graph
```
graph = HomoSapiens()
```
We compute the graph report:
```
graph
```
Enable the speedups
```
graph.enable()
```
## Random Spanning arborescence
The spanning arborescence algorithm computes a set of edges, an [Arborescence](https://en.wikipedia.org/wiki/Arborescence_(graph_theory)), that is spanning, i.e cover all the nodes in the graph.
This is an implementation of [A fast, parallel spanning tree algorithm for symmetric multiprocessors
(SMPs)](https://davidbader.net/publication/2005-bc/2005-bc.pdf).
```
%%time
spanning_arborescence_edges = graph.spanning_arborescence()
```
## Connected components
The [connected components](https://en.wikipedia.org/wiki/Component_(graph_theory)) of a graph are the set of nodes connected one another by edges.
```
%%time
(
connected_component_ids,
number_of_connected_components,
minimum_component_size,
maximum_component_size
) = graph.connected_components()
```
## Diameter
The following is an implementation of [On computing the diameter of real-world undirected graphs](https://who.rocq.inria.fr/Laurent.Viennot/road/papers/ifub.pdf).
```
%%time
diameter = graph.get_diameter(ignore_infinity=True)
```
Note that most properties that boil down to a single value once computed are stored in a cache structure, so recomputing the diameter once it is done takes a significant smaller time.
```
%%time
diameter = graph.get_diameter(ignore_infinity=True)
```
## Clustering coefficient and triangles
This is an implementation of [Faster Clustering Coefficient Using Vertex Covers](https://davidbader.net/publication/2013-g-ba/2013-g-ba.pdf), proving the average clustering coefficient, the total number of triangles and the number of triangles per node.
```
%%time
graph.get_number_of_triangles()
%%time
graph.get_number_of_triangles_per_node()
%%time
graph.get_average_clustering_coefficient()
%%time
graph.get_clustering_coefficient_per_node()
```
| true |
code
| 0.684949 | null | null | null | null |
|
# Jupyter Example 5 for HERMES: Neutrinos
```
from pyhermes import *
from pyhermes.units import PeV, TeV, GeV, mbarn, kpc, pc, deg, rad
import astropy.units as u
import numpy as np
import healpy
import matplotlib.pyplot as plt
```
HEMRES has available two cross-section modules for $pp \rightarrow \nu$:
* one built on top of cparamlib: Kamae et al. 2006
* one based on Kelner-Aharonian parametrization
```
kamae06 = interactions.Kamae06Neutrino()
kelahar = interactions.KelnerAharonianNeutrino()
E_neutrino_range = np.logspace(0,6,100)*GeV
E_proton_list = [10*GeV, 100*GeV, 1*TeV, 100*TeV, 1*PeV]
diff_sigma = lambda model, E_proton: [
E_neutrino*model.getDiffCrossSection(E_proton, E_neutrino)/mbarn
for E_neutrino in E_neutrino_range
]
diff_sigma_kamae06 = lambda E_proton: diff_sigma(kamae06, E_proton)
diff_sigma_kelahar = lambda E_proton: diff_sigma(kelahar, E_proton)
colors = ['tab:brown', 'tab:red', 'tab:green', 'tab:blue', 'tab:orange']
for E_proton, c in zip(E_proton_list, colors):
plt.loglog(E_neutrino_range/GeV, diff_sigma_kamae06(E_proton),
ls='-', color=c, label="{}".format(E_proton.toAstroPy().to('TeV').round(2)))
plt.loglog(E_neutrino_range/GeV, diff_sigma_kelahar(E_proton),
ls='--', color=c)
plt.ylim(top=1e3, bottom=1e-2)
plt.title("Kamae06 (solid) and K&A (dashed) for a list of $E_p$")
plt.xlabel(r"$E_\nu$ / GeV")
plt.ylabel(r"$E_\nu\, \mathrm{d}\sigma_{pp \rightarrow \nu} / \mathrm{d} E_\nu$ [mbarn]")
_ = plt.legend(loc="upper right", frameon=False)
def integrate_template(integrator, nside):
integrator.setupCacheTable(60, 60, 12)
sun_pos = Vector3QLength(8.0*kpc, 0*pc, 0*pc)
integrator.setSunPosition(sun_pos)
mask_edges = ([5*deg, 0*deg], [-5*deg, 180*deg])
mask = RectangularWindow(*mask_edges)
skymap_range = GammaSkymapRange(nside, 0.05*TeV, 1e4*TeV, 20)
skymap_range.setIntegrator(integrator)
skymap_range.setMask(mask)
skymap_range.compute()
return skymap_range
def integrate_neutrino(cosmicrays, gas, crosssection):
nside = 256
integrator = PiZeroIntegrator(cosmicrays, gas, crosssection)
return integrate_template(integrator, nside)
neutral_gas_HI = neutralgas.RingModel(neutralgas.RingType.HI)
proton = cosmicrays.Dragon2D(Proton)
skymap_range_neutrino_HI_kamae06 = integrate_neutrino(proton, neutral_gas_HI, kamae06)
skymap_range_neutrino_HI_kelahar = integrate_neutrino(proton, neutral_gas_HI, kelahar)
#use_units = skymap_range_HI[0].getUnits() # default units for GammaSkymap (GeV^-1 m^-2 s^-1 sr^-1)
use_units = "GeV^-1 cm^-2 s^-1 sr^-1" # override default
skymap_units = u.Quantity(1, use_units)
base_units = skymap_units.unit.si.scale
def calc_mean_flux(skymap_range):
energies = np.array([float(s.getEnergy()/GeV) for s in skymap_range])
fluxes = np.array([s.getMean() for s in skymap_range]) / base_units
return energies, fluxes
def plot_spectrum(skymap_range, label, style):
energies, fluxes = calc_mean_flux(skymap_range)
plt.plot(energies, fluxes*energies**2, style, label=label)
def plot_total_spectrum(list_of_skymap_range, label, style):
fluxes = QDifferentialIntensity(0)
for skymap_range in list_of_skymap_range:
energies, fluxes_i = calc_mean_flux(skymap_range)
fluxes = fluxes + fluxes_i
plt.plot(energies, fluxes*energies**2, style, label=label)
fig, ax = plt.subplots()
plot_spectrum(skymap_range_neutrino_HI_kamae06, r'$\nu $ @ p + HI (Kamae06)', '-')
plot_spectrum(skymap_range_neutrino_HI_kelahar, r'$\nu $ @ p + HI (K&A)', '--')
plt.title("Neutrinos from diffuse emission (Fornieri20, Remy18)\n $|b| < 5^\degree$, $0^\degree \leq l \leq 180^\degree$")
plt.legend(loc="lower left")
plt.xlabel(r"$E_\nu$ / GeV")
plt.ylabel(r"$E_\nu\, \mathrm{d}\Phi_\gamma / \mathrm{d} E_\gamma$ / " + (skymap_units*u.GeV**2).unit.to_string(format='latex_inline'))
ax.tick_params(which='minor', direction='in', axis='both', bottom=True, top=True, left=True, right=True, length=3)
ax.tick_params(which='major', direction='in', axis='both', bottom=True, top=True, left=True, right=True, length=5)
plt.xscale("log")
plt.yscale("log")
plt.ylim(10**(-9), 10**(-6))
plt.xlim(10**(2), 10**(6))
#plt.savefig("img/neutrinos-from-diffuse-emission-spectrum-180.pdf", dpi=150)
```
| true |
code
| 0.740233 | null | null | null | null |
|
# V0.1.6 - Simulate a Predefined Model
Example created by Wilson Rocha Lacerda Junior
```
pip install sysidentpy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sysidentpy.metrics import root_relative_squared_error
from sysidentpy.utils.generate_data import get_miso_data, get_siso_data
from sysidentpy.polynomial_basis.simulation import SimulatePolynomialNarmax
```
## Generating 1 input 1 output sample data
### The data is generated by simulating the following model:
$y_k = 0.2y_{k-1} + 0.1y_{k-1}x_{k-1} + 0.9x_{k-2} + e_{k}$
If *colored_noise* is set to True:
$e_{k} = 0.8\nu_{k-1} + \nu_{k}$
where $x$ is a uniformly distributed random variable and $\nu$ is a gaussian distributed variable with $\mu=0$ and $\sigma=0.1$
In the next example we will generate a data with 1000 samples with white noise and selecting 90% of the data to train the model.
```
x_train, x_test, y_train, y_test = get_siso_data(n=1000,
colored_noise=False,
sigma=0.001,
train_percentage=90)
```
## Defining the model
We already know that the generated data is a result of the model $𝑦_𝑘=0.2𝑦_{𝑘−1}+0.1𝑦_{𝑘−1}𝑥_{𝑘−1}+0.9𝑥_{𝑘−2}+𝑒_𝑘$ . Thus, we can create a model with those regressors follwing a codification pattern:
- $0$ is the constant term,
- $[1001] = y_{k-1}$
- $[100n] = y_{k-n}$
- $[200n] = x1_{k-n}$
- $[300n] = x2_{k-n}$
- $[1011, 1001] = y_{k-11} \times y_{k-1}$
- $[100n, 100m] = y_{k-n} \times y_{k-m}$
- $[12001, 1003, 1001] = x11_{k-1} \times y_{k-3} \times y_{k-1}$
- and so on
### Importante Note
The order of the arrays matter.
If you use [2001, 1001], it will work, but [1001, 2001] will not (the regressor will be ignored). Always put the highest value first:
- $[2003, 2001]$ **works**
- $[2001, 2003]$ **do not work**
We will handle this limitation in upcoming update.
```
s = SimulatePolynomialNarmax()
# the model must be a numpy array
model = np.array(
[
[1001, 0], # y(k-1)
[2001, 1001], # x1(k-1)y(k-1)
[2002, 0], # x1(k-2)
]
)
# theta must be a numpy array of shape (n, 1) where n is the number of regressors
theta = np.array([[0.2, 0.9, 0.1]]).T
```
## Simulating the model
After defining the model and theta we just need to use the simulate method.
The simulate method returns the predicted values and the results where we can look at regressors,
parameters and ERR values.
```
yhat, results = s.simulate(
X_test=x_test,
y_test=y_test,
model_code=model,
theta=theta,
plot=True)
results = pd.DataFrame(results, columns=['Regressors', 'Parameters', 'ERR'])
results
```
### Options
You can set the `steps_ahead` to run the prediction/simulation:
```
yhat, results = s.simulate(
X_test=x_test,
y_test=y_test,
model_code=model,
theta=theta,
plot=False,
steps_ahead=1)
rrse = root_relative_squared_error(y_test, yhat)
rrse
yhat, results = s.simulate(
X_test=x_test,
y_test=y_test,
model_code=model,
theta=theta,
plot=False,
steps_ahead=21)
rrse = root_relative_squared_error(y_test, yhat)
rrse
```
### Estimating the parameters
If you have only the model strucuture, you can create an object with `estimate_parameter=True` and
choose the methed for estimation using `estimator`. In this case, you have to pass the training data
for parameters estimation.
When `estimate_parameter=True`, we also computate the ERR considering only the regressors defined by the user.
```
s2 = SimulatePolynomialNarmax(estimate_parameter=True, estimator='recursive_least_squares')
yhat, results = s2.simulate(
X_train=x_train,
y_train=y_train,
X_test=x_test,
y_test=y_test,
model_code=model,
# theta will be estimated using the defined estimator
plot=True)
results = pd.DataFrame(results, columns=['Regressors', 'Parameters', 'ERR'])
results
yhat, results = s2.simulate(
X_train=x_train,
y_train=y_train,
X_test=x_test,
y_test=y_test,
model_code=model,
# theta will be estimated using the defined estimator
plot=True,
steps_ahead=8)
results = pd.DataFrame(results, columns=['Regressors', 'Parameters', 'ERR'])
results
yhat, results = s2.simulate(
X_train=x_train,
y_train=y_train,
X_test=x_test,
y_test=y_test,
model_code=model,
# theta will be estimated using the defined estimator
plot=True,
steps_ahead=8)
```
| true |
code
| 0.605507 | null | null | null | null |
|
# Segmentation
<div class="alert alert-info">
This tutorial is available as an IPython notebook at [Malaya/example/segmentation](https://github.com/huseinzol05/Malaya/tree/master/example/segmentation).
</div>
<div class="alert alert-info">
This module trained on both standard and local (included social media) language structures, so it is save to use for both.
</div>
```
%%time
import malaya
```
Common problem for social media texts, there are missing spaces in the text, so text segmentation can help you,
1. huseinsukamakan ayam,dia sgtrisaukan -> husein suka makan ayam, dia sgt risaukan.
2. drmahathir sangat menekankan budaya budakzamansekarang -> dr mahathir sangat menekankan budaya budak zaman sekarang.
3. ceritatunnajibrazak -> cerita tun najib razak.
4. TunM sukakan -> Tun M sukakan.
Segmentation only,
1. Solve spacing error.
3. Not correcting any grammar.
```
string1 = 'huseinsukamakan ayam,dia sgtrisaukan'
string2 = 'drmahathir sangat menekankan budaya budakzamansekarang'
string3 = 'ceritatunnajibrazak'
string4 = 'TunM sukakan'
string_hard = 'IPOH-AhliDewanUndangan Negeri(ADUN) HuluKinta, MuhamadArafat Varisai Mahamadmenafikanmesejtularmendakwa beliau akan melompatparti menyokong UMNO membentuk kerajaannegeridiPerak.BeliauyangjugaKetua Penerangan Parti Keadilan Rakyat(PKR)Perak dalam satumesejringkaskepadaSinar Harian menjelaskan perkara itutidakbenarsama sekali.'
string_socialmedia = 'aqxsukalah apeyg tejadidekat mamattu'
```
### Viterbi algorithm
Commonly people use Viterbi algorithm to solve this problem, we also added viterbi using ngram from bahasa papers and wikipedia.
```python
def viterbi(max_split_length: int = 20, **kwargs):
"""
Load Segmenter class using viterbi algorithm.
Parameters
----------
max_split_length: int, (default=20)
max length of words in a sentence to segment
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
result : malaya.segmentation.SEGMENTER class
"""
```
```
viterbi = malaya.segmentation.viterbi()
```
#### Segmentize
```python
def segment(self, strings: List[str]):
"""
Segment strings.
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
```
```
%%time
viterbi.segment([string1, string2, string3, string4])
%%time
viterbi.segment([string_hard, string_socialmedia])
```
### List available Transformer model
```
malaya.segmentation.available_transformer()
```
### Load Transformer model
```python
def transformer(model: str = 'small', quantized: bool = False, **kwargs):
"""
Load transformer encoder-decoder model to Segmentize.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'small'`` - Transformer SMALL parameters.
* ``'base'`` - Transformer BASE parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: malaya.model.tf.Segmentation class
"""
```
```
model = malaya.segmentation.transformer(model = 'small')
quantized_model = malaya.segmentation.transformer(model = 'small', quantized = True)
model_base = malaya.segmentation.transformer(model = 'base')
quantized_model_base = malaya.segmentation.transformer(model = 'base', quantized = True)
```
#### Predict using greedy decoder
```python
def greedy_decoder(self, strings: List[str]):
"""
Segment strings using greedy decoder.
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
```
```
%%time
model.greedy_decoder([string1, string2, string3, string4])
%%time
quantized_model.greedy_decoder([string1, string2, string3, string4])
%%time
model_base.greedy_decoder([string1, string2, string3, string4])
%%time
quantized_model_base.greedy_decoder([string1, string2, string3, string4])
%%time
model.greedy_decoder([string_hard, string_socialmedia])
%%time
quantized_model.greedy_decoder([string_hard, string_socialmedia])
%%time
model_base.greedy_decoder([string_hard, string_socialmedia])
%%time
quantized_model_base.greedy_decoder([string_hard, string_socialmedia])
```
**Problem with batching string, short string might repeating itself, so to solve this, you need to give a single string only**,
```
%%time
quantized_model_base.greedy_decoder([string_socialmedia])
%%time
quantized_model_base.greedy_decoder([string3])
%%time
quantized_model_base.greedy_decoder([string4])
```
#### Predict using beam decoder
```python
def beam_decoder(self, strings: List[str]):
"""
Segment strings using beam decoder, beam width size 3, alpha 0.5 .
Example, "sayasygkan negarasaya" -> "saya sygkan negara saya"
Parameters
----------
strings : List[str]
Returns
-------
result: List[str]
"""
```
```
%%time
quantized_model.beam_decoder([string_socialmedia])
%%time
quantized_model_base.beam_decoder([string_socialmedia])
```
**We can expect beam decoder is much more slower than greedy decoder**.
| true |
code
| 0.66961 | null | null | null | null |
|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import descartes
import geopandas as gpd
from shapely.geometry import Point, Polygon
from shapely.ops import nearest_points
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
import math
import time
from matplotlib import cm
import matplotlib.lines as mlines
%matplotlib inline
```
### AIR POLLUTION MONITORING DATA FROM EDF
```
df = pd.read_csv('EDF_Data.csv', header = 1)
df['TimePeriod'] = 'Jun2015-May2016'
df.tail()
df.shape
geometry = [Point(xy) for xy in zip(df['Longitude'], df['Latitude'])]
```
### Split the dataset into BC and NO2 since we are interested only in those two pollutants
```
BC_df = df[['Longitude', 'Latitude', 'BC Value', 'TimePeriod']]
NO2_df = df[['Longitude', 'Latitude', 'NO2 Value', 'TimePeriod']]
crs = {'init': 'epsg:4326'}
geo_df = gpd.GeoDataFrame(df, crs = crs, geometry = geometry)
```
## TRAFFIC DATA
```
### Load Annual Average Daily Traffic (AADT) file from Caltrans
traffic = pd.read_csv('Data/Traffic_Oakland_AADT.csv', header = 0)
# Drop columns that are unneccessary and choose only Ahead_AADT, along with N/E latitude and longitude
traffic.drop(columns = ['OBJECTID','District','Route','County', 'Postmile',
'Back_pk_h', 'Back_pk_m', 'Ahead_pk_h', 'Ahead_pk_m','Back_AADT','Lat_S_or_W', 'Lon_S_or_W'], inplace=True)
traffic.rename(columns={"Ahead_AADT":"AADT", "Lat_N_or_E":"Latitude", "Lon_N_or_E":"Longitude", "Descriptn":"Description"}, inplace=True)
traffic.head()
# Taking a closer look at the traffic data, there are some intersections where the AADT is zero, or the latitude and longitude are zero. We want to drop these rows
traffic = traffic[(traffic['Longitude']<-1) & (traffic['AADT']>1)]
traffic.shape
```
## Converting facility and traffic dataframe into a geopandas dataframe for plotting
```
# Create a geopandas dataframe with traffic data
geometry_traffic = [Point(xy) for xy in zip(traffic['Longitude'], traffic['Latitude'])]
geo_df_traffic = gpd.GeoDataFrame(traffic, crs = crs, geometry = geometry_traffic)
# Create a list of x and y coordinates for the Black Carbon concentration data using geopandas
geometry_df_BC = [Point(xy) for xy in zip(BC_df['Longitude'], BC_df['Latitude'])]
geo_df_BC = gpd.GeoDataFrame(BC_df, crs = crs, geometry = geometry_df_BC)
```
### Calculate distance between point of measurement and each facility and add it to the _dist column
```
### Defining a function to calculate the distance between two GPS coordinates (latitude and longitude)
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
```
#### Loading Traffic Data
```
traffic.head()
## Assign an intersection number to each traffic intersection instead of using description
traffic.reset_index(inplace=True)
#Rename index as Intersection
traffic.rename(columns={"index":"Intersection"}, inplace=True)
#Drop the description column
traffic.drop(columns=['Description'], inplace=True)
### Add an empty column for distance
traffic['dist'] = 0
traffic['dist'].astype(float)
traffic_lat = traffic[['Intersection', 'Latitude']].T
traffic_long = traffic[['Intersection', 'Longitude']].T
traffic_AADT = traffic[['Intersection', 'AADT']].T
traffic_dist = traffic[['Intersection', 'dist']].T
traffic_geo = traffic[['Intersection', 'geometry']].T
traffic_lat.head()
## Make the header as the first row in each transposed dataframe
traffic_lat = traffic_lat.rename(columns=traffic_lat.iloc[0].astype(int)).drop(traffic_lat.index[0])
traffic_long = traffic_long.rename(columns=traffic_long.iloc[0].astype(int)).drop(traffic_long.index[0])
traffic_AADT = traffic_AADT.rename(columns=traffic_AADT.iloc[0].astype(int)).drop(traffic_AADT.index[0])
traffic_dist = traffic_dist.rename(columns=traffic_dist.iloc[0].astype(int)).drop(traffic_dist.index[0])
traffic_geo = traffic_geo.rename(columns=traffic_geo.iloc[0].astype(int)).drop(traffic_geo.index[0])
## Add suffix to column header based on the dataframe type
traffic_lat.columns = [str(col) + '_latitude' for col in traffic_lat.columns]
traffic_long.columns = [str(col) + '_longitude' for col in traffic_long.columns]
traffic_AADT.columns = [str(col) + '_AADT' for col in traffic_AADT.columns]
traffic_dist.columns = [str(col) + '_traf_dist' for col in traffic_dist.columns]
traffic_geo.columns = [str(col) + '_geo' for col in traffic_geo.columns]
## Remove index for each dataframe
traffic_lat.reset_index(drop=True, inplace=True)
traffic_long.reset_index(drop=True, inplace=True)
traffic_AADT.reset_index(drop=True, inplace=True)
traffic_dist.reset_index(drop=True, inplace=True)
traffic_geo.reset_index(drop=True, inplace=True)
traffic_combined = traffic_lat.join(traffic_long).join(traffic_AADT).join(traffic_dist).join(traffic_geo)
traffic_combined
traffic_combined = traffic_combined.reindex(columns=sorted(traffic_combined.columns))
#Create a datafram where each row contains emissions of PM10 and PM2.5 for each facility
traffic_combined = traffic_combined.loc[traffic_combined.index.repeat(21488)].reset_index(drop=True)
BC_Traffic = BC_df.join(traffic_combined)
BC_Traffic.head()
# Convert distance column to float type
for idx, col in enumerate(BC_Traffic.columns):
if "_traf_dist" in col:
BC_Traffic[col] = pd.to_numeric(BC_Traffic[col], downcast="float")
```
#### Calculate distance between each traffic intersection and point of measurement and store this in the _dist column
```
for index, row in BC_Traffic.iterrows():
for idx, col in enumerate(BC_Traffic.columns):
if "_traf_dist" in col:
BC_Traffic.at[index,col] = float(distance((row.iloc[1], row.iloc[0]), (row.iloc[idx-2], row.iloc[idx-1])))*0.621
#BC_Facility_Traffic.at[index,col] = float(row.iloc[idx])
BC_Traffic.head()
#### Write this to a dataframe
BC_Traffic.to_csv("Data/BC_Traffic_ALL.csv")
```
#### Similar to the facility dataframe, drop latitude and longitude since its captured in the distance column. Also drop AADT
```
BC_Traffic.drop(list(BC_Traffic.filter(regex = '_latitude')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = '_longitude')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = '_AADT')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = '_geo')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = 'Longitude')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = 'Latitude')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = 'TimePeriod')), axis = 1, inplace = True)
BC_Traffic.drop(list(BC_Traffic.filter(regex = 'geometry')), axis = 1, inplace = True)
BC_Traffic.head()
corr = BC_Traffic.corr()
arr_corr = corr.as_matrix()
arr_corr[0]
```
#### Plotting correlation between all features as a heatmap - but this visualization is not easy to follow....
fig, ax = plt.subplots(figsize=(100, 100))
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=500),
square=False
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
plt.show()
```
print(plt.get_backend())
# close any existing plots
plt.close("all")
# mask out the top triangle
arr_corr[np.triu_indices_from(arr_corr)] = np.nan
fig, ax = plt.subplots(figsize=(50, 50))
hm = sns.heatmap(arr_corr, cbar=True, vmin = -1, vmax = 1, center = 0,
fmt='.2f', annot_kws={'size': 8}, annot=True,
square=False, cmap = 'coolwarm')
#cmap=plt.cm.Blues
ticks = np.arange(corr.shape[0]) + 0.5
ax.set_xticks(ticks)
ax.set_xticklabels(corr.columns, rotation=90, fontsize=8)
ax.set_yticks(ticks)
ax.set_yticklabels(corr.index, rotation=360, fontsize=8)
ax.set_title('correlation matrix')
plt.tight_layout()
#plt.savefig("corr_matrix_incl_anno_double.png", dpi=300)
```
#### Once again there doesn't seem to be much correlation between BC concentrations and the closest major traffic intersection. Next option is to identify all the traffic intersections in the area.
import chart_studio.plotly as py
import plotly.graph_objs as go
import chart_studio
chart_studio.tools.set_credentials_file(username='varsha2509', api_key='QLfBsWWLPKoLjY5hW0Fu')
heatmap = go.Heatmap(z=arr_corr, x=BC_Facility_Traffic_Met.columns, y=BC_Facility_Traffic_Met.index)
data = [heatmap]
py.iplot(data, filename='basic-heatmap')
| true |
code
| 0.406126 | null | null | null | null |
|
Definition of **DTLZ2 problem** with 3 objective functions:
$f_1(X) = (1 + g(x_3)) \cdot cos(x_1 \cdot \frac{\pi}{2}) \cdot cos(x_2 \cdot \frac{\pi}{2})$
$f_2(X) = (1 + g(x_3)) \cdot cos(x_1 \cdot \frac{\pi}{2}) \cdot sin(x_2 \cdot \frac{\pi}{2})$
$f_3(x) = (1 + g(x_3)) \cdot sin(x_1 \cdot \frac{\pi}{2})$
with
$-10 \leq x_1 \leq 10$
$-10 \leq x_2 \leq 10$
$-10 \leq x_3 \leq 10$
$g(x_3) = \sum_{x_i \in X_M} (x_i - 0.5)^2$
adapted from "*Scalable Test Problems for Evolutionary Multi-Objective Optimization*" section 8.2.
```
import sys
sys.path.append('../..')
import numpy as np
import matplotlib.pyplot as plt
import beagle as be
np.random.seed(1997)
# Problem definition
def func_1(values):
const = np.pi / 2
return (1 + g(values)) * np.cos(values[0]*const) * np.cos(values[1]*const)
def func_2(values):
const = np.pi / 2
return (1 + g(values)) * np.cos(values[0]*const) * np.sin(values[1]*const)
def func_3(values):
const = np.pi / 2
return (1 + g(values)) * np.sin(values[0]*const)
def g(values):
result = 0.0
for val in values:
result += (val - 0.5)**2
return result
x_1 = x_2 = x_3 = (-10.0, 10.0)
representation = 'real'
# Algorithm definition
nsga2 = be.use_algorithm(
'experimental.NSGA2',
fitness=be.Fitness(func_1, func_2, func_3),
population_size=100,
individual_representation='real',
bounds = [x_1, x_2, x_3],
alg_id='nsga2',
evaluate_in_parallel=False
)
spea2 = be.use_algorithm(
'experimental.SPEA2',
fitness=be.Fitness(func_1, func_2, func_3),
population_size=50,
individual_representation='real',
bounds = [x_1, x_2, x_3],
spea2_archive=100,
alg_id='spea2',
evaluate_in_parallel=False
)
wrapper = be.parallel(nsga2, spea2, generations=50)
wrapper.algorithms
# NSGA2
be.display(wrapper.algorithms[0], only_show=True)
# SPEA2
be.display(wrapper.algorithms[1], only_show=True)
# Obtain the solutions that make up the non-dominated front of each algorithm
indices, values = be.pareto_front(wrapper.algorithms[0])
nsga2_sols = np.array([
wrapper.algorithms[0].population[idx].values for idx in indices['population']
])
indices, values = be.pareto_front(wrapper.algorithms[0])
spea2_sols = np.array([
wrapper.algorithms[1].population['archive'][idx].values for idx in indices['population']
])
fig = plt.figure(2, figsize=(15, 15))
ax1 = fig.add_subplot(221, projection='3d')
ax2 = fig.add_subplot(222, projection='3d')
ax3 = fig.add_subplot(223, projection='3d')
ax4 = fig.add_subplot(224, projection='3d')
# Problem definition
def f_1_vec(x, y, z):
const = np.pi / 2
values = np.array((x, y, z))
return (1 + g_vec(values)) * np.cos(x*const) * np.cos(y*const)
def f_2_vec(x, y, z):
const = np.pi / 2
values = np.array((x, y, z))
return (1 + g_vec(values)) * np.cos(x*const) * np.sin(y*const)
def f_3_vec(x, y, z):
const = np.pi / 2
values = np.array((x, y, z))
return (1 + g_vec(values)) * np.sin(x*const)
def g_vec(values):
result = np.power(values - 0.5, 2)
return np.sum(result, axis=0)
for ax in [ax1, ax2, ax3, ax4]:
# Plot the obtained Pareto's front
ax.scatter(
f_1_vec(spea2_sols[:, 0], nsga2_sols[:, 1], nsga2_sols[:, 2]),
f_2_vec(nsga2_sols[:, 0], nsga2_sols[:, 1], nsga2_sols[:, 2]),
f_3_vec(nsga2_sols[:, 0], nsga2_sols[:, 1], nsga2_sols[:, 2]),
color='red', alpha=0.7, linewidth=0, antialiased=False, label='SPEA2')
ax.scatter(
f_1_vec(spea2_sols[:, 0], spea2_sols[:, 1], spea2_sols[:, 2]),
f_2_vec(spea2_sols[:, 0], spea2_sols[:, 1], spea2_sols[:, 2]),
f_3_vec(spea2_sols[:, 0], spea2_sols[:, 1], spea2_sols[:, 2]),
color='green', alpha=0.7, linewidth=0, antialiased=False, label='NSGA2')
ax.set_xlabel('f1(x)', size=15)
ax.set_ylabel('f2(x)', size=15)
ax.set_zlabel('f3(x)', size=15)
ax2.view_init(40, -20)
ax3.view_init(40, 0)
ax4.view_init(40, 30)
handles, labels = ax1.get_legend_handles_labels()
fig.legend(handles, labels, loc='lower center', fontsize=20, markerscale=2)
plt.show()
```
| true |
code
| 0.555073 | null | null | null | null |
|
# Simple Image Classifier - Bring Your Own Data
## Neuronale Netze auf https://bootcamp.codecentric.ai
Jetzt wird es Zeit, mit einem eigenen Dataset zu experimentieren.
Hinweis: Wenn du auf einem Rechner trainierst, wo keine gut GPU verfügbar ist, kann dies sehr lange dauern. Evtl. möchtest du in dem Fall das Kapitel zu "Training in der Cloud" vorziehen und das Experiment dort durchführen.
Imports und Settings
```
from fastai.basics import *
from fastai.vision import *
```
### Ordner festlegen, wo Daten liegen
Überlege dir, welche Bilder du klassifizieren möchtest.
Wenn du dich zum Beispiel für Vogel vs. Turnschuh entscheidest, lege eine Ordnerstruktur an - z.B.:
- /data/byod/train/
- vogel/bild1.jpg
- vogel/bild2.jpg
- vogel/...
- turnschuh/bild1.jpg
- turnschuh/...
Die Namen der Ordner sind wichtig - das sind deine Label. Die Namen der Bilder sind egal (es müssen auch nicht nur jpg sein).
Die Bilder werden anhand der Ordner "gelabelt".
Wieviele Bilder braucht man dafür? Fang doch einfach mal mit 10-20 Bildern pro Kategorie an und probiere es aus ... Vllt. findest du auch eine Möglichkeit "automatisiert" mehrere Bilder herunter zu laden.
Oft ist es ein großer Aufwand erstmal genügend Daten in der entsprechenden Qualität zu bekommen.
```
DATA = "/data/byod/"
TRAIN = DATA + "train/"
```
Der folgende Befehl macht:
* Daten aus Ordner laden (bzw. einen Loader definieren)
* Labels aus Ordner Namen zuordnen (alle Bilder im Ordner Kiwi sind Kiwis)
* Split Train/Valid (20 %)
* Bilder verkleinern (wenn du nur auf CPU trainierst wähle eine kleine Size, sonst dauert das Training sehr lang)
* (und einiges mehr)
```
data = ImageDataBunch.from_folder(TRAIN, valid_pct=0.2, size=200, bs=20)
```
Wie sehen unsere Daten aus? Einfach mal ein paar Beispiele der Trainigsdaten anzeigen:
```
data.show_batch(rows=3, figsize=(6, 6))
```
Der folgende Befehl macht:
* Erzeuge ein CNN
* mit einer Standard Architektur (vortrainiertes ResNet)
* Architektur wird automatisch auf neue Daten angepasst (Bildgrößen, Klassen, etc.)
* gebe im Trainingsloop die Metrik "Accuracy" aus
* unter der Haube werden viele Standard-Werte gesetzt (welcher Optimizer, Hyperparameter, Best Practices, ...)
```
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
```
### Start Training
```
learn.fit(1)
```
### Jetzt mit dem trainierten Modell eine Vorhersage machen
Wenn du ein paar Bilder testen möchtest, dann lege unter /data/byod/ einen test Ordner an und kopiere ein paar Bilder hinein (Bilder, die nicht beim Training verwendet wurden). Hierbei musst du keine Unterordner anlegen (das Modell soll ja vorhersagen, welche Klasse es ist)
Jetzt nehmen wir ein random Bild aus dem Test Ordner:
```
TEST = DATA + "test/"
TEST_IMAGES = os.listdir(TEST)
TEST_IMAGES
test_img = open_image(TEST + random.choice(TEST_IMAGES))
test_img
```
und machen eine prediction mit dem Modell:
```
learn.predict(test_img)
```
## Credits
Für die Übung verwenden wir die fast.ai Library - siehe http://fast.ai
| true |
code
| 0.611411 | null | null | null | null |
|
# Linear Regression
<img src="https://raw.githubusercontent.com/glazec/practicalAI/master/images/logo.png" width=150>
In this lesson we will learn about linear regression. We will first understand the basic math behind it and then implement it in Python. We will also look at ways of interpreting the linear model.
# Overview
<img src="https://raw.githubusercontent.com/glazec/practicalAI/master/images/linear.png" width=250>
$\hat{y} = XW$
*where*:
* $\hat{y}$ = prediction | $\in \mathbb{R}^{NX1}$ ($N$ is the number of samples)
* $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features)
* $W$ = weights | $\in \mathbb{R}^{DX1}$
* **Objective:** Use inputs $X$ to predict the output $\hat{y}$ using a linear model. The model will be a line of best fit that minimizes the distance between the predicted and target outcomes. Training data $(X, y)$ is used to train the model and learn the weights $W$ using stochastic gradient descent (SGD).
* **Advantages:**
* Computationally simple.
* Highly interpretable.
* Can account for continuous and categorical features.
* **Disadvantages:**
* The model will perform well only when the data is linearly separable (for classification).
* Usually not used for classification and only for regression.
* **Miscellaneous:** You can also use linear regression for binary classification tasks where if the predicted continuous value is above a threshold, it belongs to a certain class. But we will cover better techniques for classification is future lessons and will focus on linear regression for continuos regression tasks only.
# Training
*Steps*:
1. Randomly initialize the model's weights $W$.
2. Feed inputs $X$ into the model to receive the predictions $\hat{y}$.
3. Compare the predictions $\hat{y}$ with the actual target values $y$ with the objective (cost) function to determine loss $J$. A common objective function for linear regression is mean squarred error (MSE). This function calculates the difference between the predicted and target values and squares it. (the $\frac{1}{2}$ is just for convenicing the derivative operation).
* $MSE = J(\theta) = \frac{1}{2}\sum_{i}(\hat{y}_i - y_i)^2$
4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights.
* $J(\theta) = \frac{1}{2}\sum_{i}(\hat{y}_i - y_i)^2 = \frac{1}{2}\sum_{i}(X_iW - y_i)^2 $
* $\frac{\partial{J}}{\partial{W}} = X(\hat{y} - y)$
4. Apply backpropagation to update the weights $W$ using a learning rate $\alpha$ and an optimization technique (ie. stochastic gradient descent). The simplified intuition is that the gradient tells you the direction for how to increase something so subtracting it will help you go the other way since we want to decrease loss $J(\theta)$.
* $W = W- \alpha\frac{\partial{J}}{\partial{W}}$
5. Repeat steps 2 - 4 until model performs well.
# Data
We're going to create some simple dummy data to apply linear regression on.
```
from argparse import Namespace
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Arguments
args = Namespace(
seed=1234,
data_file="sample_data.csv",
num_samples=100,
train_size=0.75,
test_size=0.25,
num_epochs=100,
)
# Set seed for reproducability
np.random.seed(args.seed)
# Generate synthetic data
def generate_data(num_samples):
X = np.array(range(num_samples))
y = 3.65*X + 10
return X, y
# Generate random (linear) data
X, y = generate_data(args.num_samples)
data = np.vstack([X, y]).T
df = pd.DataFrame(data, columns=['X', 'y'])
df.head()
# Scatter plot
plt.title("Generated data")
plt.scatter(x=df["X"], y=df["y"])
plt.show()
```
# Scikit-learn implementation
**Note**: The `LinearRegression` class in Scikit-learn uses the normal equation to solve the fit. However, we are going to use Scikit-learn's `SGDRegressor` class which uses stochastic gradient descent. We want to use this optimization approach because we will be using this for the models in subsequent lessons.
```
# Import packages
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Create data splits
X_train, X_test, y_train, y_test = train_test_split(
df["X"].values.reshape(-1, 1), df["y"], test_size=args.test_size,
random_state=args.seed)
print ("X_train:", X_train.shape)
print ("y_train:", y_train.shape)
print ("X_test:", X_test.shape)
print ("y_test:", y_test.shape)
```
We need to standardize our data (zero mean and unit variance) in order to properly use SGD and optimize quickly.
```
# Standardize the data (mean=0, std=1) using training data
X_scaler = StandardScaler().fit(X_train)
y_scaler = StandardScaler().fit(y_train.values.reshape(-1,1))
# Apply scaler on training and test data
standardized_X_train = X_scaler.transform(X_train)
standardized_y_train = y_scaler.transform(y_train.values.reshape(-1,1)).ravel()
standardized_X_test = X_scaler.transform(X_test)
standardized_y_test = y_scaler.transform(y_test.values.reshape(-1,1)).ravel()
# Check
print ("mean:", np.mean(standardized_X_train, axis=0),
np.mean(standardized_y_train, axis=0)) # mean should be ~0
print ("std:", np.std(standardized_X_train, axis=0),
np.std(standardized_y_train, axis=0)) # std should be 1
# Initialize the model
lm = SGDRegressor(loss="squared_loss", penalty="none", max_iter=args.num_epochs)
# Train
lm.fit(X=standardized_X_train, y=standardized_y_train)
# Predictions (unstandardize them)
pred_train = (lm.predict(standardized_X_train) * np.sqrt(y_scaler.var_)) + y_scaler.mean_
pred_test = (lm.predict(standardized_X_test) * np.sqrt(y_scaler.var_)) + y_scaler.mean_
```
# Evaluation
There are several evaluation techniques to see how well our model performed.
```
import matplotlib.pyplot as plt
# Train and test MSE
train_mse = np.mean((y_train - pred_train) ** 2)
test_mse = np.mean((y_test - pred_test) ** 2)
print ("train_MSE: {0:.2f}, test_MSE: {1:.2f}".format(train_mse, test_mse))
```
Besides MSE, when we only have one feature, we can visually inspect the model.
```
# Figure size
plt.figure(figsize=(15,5))
# Plot train data
plt.subplot(1, 2, 1)
plt.title("Train")
plt.scatter(X_train, y_train, label="y_train")
plt.plot(X_train, pred_train, color="red", linewidth=1, linestyle="-", label="lm")
plt.legend(loc='lower right')
# Plot test data
plt.subplot(1, 2, 2)
plt.title("Test")
plt.scatter(X_test, y_test, label="y_test")
plt.plot(X_test, pred_test, color="red", linewidth=1, linestyle="-", label="lm")
plt.legend(loc='lower right')
# Show plots
plt.show()
```
# Inference
```
# Feed in your own inputs
X_infer = np.array((0, 1, 2), dtype=np.float32)
standardized_X_infer = X_scaler.transform(X_infer.reshape(-1, 1))
pred_infer = (lm.predict(standardized_X_infer) * np.sqrt(y_scaler.var_)) + y_scaler.mean_
print (pred_infer)
df.head(3)
```
# Interpretability
Linear regression offers the great advantage of being highly interpretable. Each feature has a coefficient which signifies it's importance/impact on the output variable y. We can interpret our coefficient as follows: By increasing X by 1 unit, we increase y by $W$ (~3.65) units.
**Note**: Since we standardized our inputs and outputs for gradient descent, we need to apply an operation to our coefficients and intercept to interpret them. See proof below.
```
# Unstandardize coefficients
coef = lm.coef_ * (y_scaler.scale_/X_scaler.scale_)
intercept = lm.intercept_ * y_scaler.scale_ + y_scaler.mean_ - np.sum(coef*X_scaler.mean_)
print (coef) # ~3.65
print (intercept) # ~10
```
### Proof for unstandardizing coefficients:
Note that both X and y were standardized.
$\frac{\mathbb{E}[y] - \hat{y}}{\sigma_y} = W_0 + \sum_{j=1}^{k}W_jz_j$
$z_j = \frac{x_j - \bar{x}_j}{\sigma_j}$
$ \hat{y}_{scaled} = \frac{\hat{y}_{unscaled} - \bar{y}}{\sigma_y} = \hat{W_0} + \sum_{j=1}^{k} \hat{W}_j (\frac{x_j - \bar{x}_j}{\sigma_j}) $
$\hat{y}_{unscaled} = \hat{W}_0\sigma_y + \bar{y} - \sum_{j=1}^{k} \hat{W}_j(\frac{\sigma_y}{\sigma_j})\bar{x}_j + \sum_{j=1}^{k}(\frac{\sigma_y}{\sigma_j})x_j $
# Regularization
Regularization helps decrease over fitting. Below is L2 regularization (ridge regression). There are many forms of regularization but they all work to reduce overfitting in our models. With L2 regularization, we are penalizing the weights with large magnitudes by decaying them. Having certain weights with high magnitudes will lead to preferential bias with the inputs and we want the model to work with all the inputs and not just a select few. There are also other types of regularization like L1 (lasso regression) which is useful for creating sparse models where some feature cofficients are zeroed out, or elastic which combines L1 and L2 penalties.
**Note**: Regularization is not just for linear regression. You can use it to regualr any model's weights including the ones we will look at in future lessons.
* $ J(\theta) = = \frac{1}{2}\sum_{i}(X_iW - y_i)^2 + \frac{\lambda}{2}\sum\sum W^2$
* $ \frac{\partial{J}}{\partial{W}} = X (\hat{y} - y) + \lambda W $
* $W = W- \alpha\frac{\partial{J}}{\partial{W}}$
where:
* $\lambda$ is the regularzation coefficient
```
# Initialize the model with L2 regularization
lm = SGDRegressor(loss="squared_loss", penalty='l2', alpha=1e-2,
max_iter=args.num_epochs)
# Train
lm.fit(X=standardized_X_train, y=standardized_y_train)
# Predictions (unstandardize them)
pred_train = (lm.predict(standardized_X_train) * np.sqrt(y_scaler.var_)) + y_scaler.mean_
pred_test = (lm.predict(standardized_X_test) * np.sqrt(y_scaler.var_)) + y_scaler.mean_
# Train and test MSE
train_mse = np.mean((y_train - pred_train) ** 2)
test_mse = np.mean((y_test - pred_test) ** 2)
print ("train_MSE: {0:.2f}, test_MSE: {1:.2f}".format(
train_mse, test_mse))
```
Regularization didn't help much with this specific example because our data is generation from a perfect linear equation but for realistic data, regularization can help our model generalize well.
```
# Unstandardize coefficients
coef = lm.coef_ * (y_scaler.scale_/X_scaler.scale_)
intercept = lm.intercept_ * y_scaler.scale_ + y_scaler.mean_ - (coef*X_scaler.mean_)
print (coef) # ~3.65
print (intercept) # ~10
```
# Categorical variables
In our example, the feature was a continuous variable but what if we also have features that are categorical? One option is to treat the categorical variables as one-hot encoded variables. This is very easy to do with Pandas and once you create the dummy variables, you can use the same steps as above to train your linear model.
```
# Create data with categorical features
cat_data = pd.DataFrame(['a', 'b', 'c', 'a'], columns=['favorite_letter'])
cat_data.head()
dummy_cat_data = pd.get_dummies(cat_data)
dummy_cat_data.head()
```
Now you can concat this with your continuous features and train the linear model.
# TODO
- polynomial regression
- simple example with normal equation method (sklearn.linear_model.LinearRegression) with pros and cons vs. SGD linear regression
| true |
code
| 0.646293 | null | null | null | null |
|
```
%matplotlib notebook
import control as c
import ipywidgets as w
import numpy as np
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
display(HTML('<script> $(document).ready(function() { $("div.input").hide(); }); </script>'))
```
## Control design for a 1DoF mass-spring-damper system
The following example is a control design task for a mass-spring-damper system, a typical second-order model. The structure consists of a sliding mass (friction is ignored), connected to a reference point with an infinitely expandable string-damper pair.<br><br>
<img src="Images/mbk.png" width="40%" />
<br>
Its equation of motion can be stated as:
<br>
$$m\cdot\ddot{x}+b\cdot\dot{x}+k\cdot{x}=F$$
<br>
After the Laplace transformation of the differential equation, the transfer function can be expressed as:
<br>
$$G(s)=\frac{1}{m\cdot s^2 +b\cdot s + k}$$
<br>
Your task is to choose a controller type, and tune it to acceptable levels of performance!
<b>First, choose a system model!</b><br>
Toggle between different realistic models with randomly preselected values (buttons *Model 1* - *Model 6*). By clicking the *Preset* button default, valid predetermined controller parameters are set and cannot be tuned further.
```
# Figure definition
fig1, ((f1_ax1), (f1_ax2)) = plt.subplots(2, 1)
fig1.set_size_inches((9.8, 5))
fig1.set_tight_layout(True)
f1_line1, = f1_ax1.plot([], [])
f1_line2, = f1_ax2.plot([], [])
f1_ax1.grid(which='both', axis='both', color='lightgray')
f1_ax2.grid(which='both', axis='both', color='lightgray')
f1_ax1.autoscale(enable=True, axis='both', tight=True)
f1_ax2.autoscale(enable=True, axis='both', tight=True)
f1_ax1.set_title('Bode magnitude plot', fontsize=11)
f1_ax1.set_xscale('log')
f1_ax1.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax1.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=10)
f1_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
f1_ax2.set_title('Bode phase plot', fontsize=11)
f1_ax2.set_xscale('log')
f1_ax2.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=10)
f1_ax2.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=10)
f1_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
# System parameters
def build_base_model(m, k, b):
W_sys = c.tf([1], [m, b, k])
print('System transfer function:')
print(W_sys)
# System analysis
poles = c.pole(W_sys) # Poles
print('System poles:\n')
print(poles)
global f1_line1, f1_line2
f1_ax1.lines.remove(f1_line1)
f1_ax2.lines.remove(f1_line2)
mag, phase, omega = c.bode_plot(W_sys, Plot=False) # Bode-plot
f1_line1, = f1_ax1.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f1_line2, = f1_ax2.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
f1_ax1.relim()
f1_ax2.relim()
f1_ax1.autoscale_view()
f1_ax2.autoscale_view()
# GUI widgets
typeSelect = w.ToggleButtons(
options=[('Model 1', 0), ('Model 2', 1), ('Model 3', 2), ('Model 4', 3), ('Model 5', 4), ('Model 6', 5), ('Preset', -1)],
value =-1, description='System: ', layout=w.Layout(width='60%'))
m_slider = w.FloatLogSlider(value=0.5, base=10, min=-3, max=3, description='m [kg] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
k_slider = w.FloatLogSlider(value=100, base=10, min=-2, max=4, description='k [N/m] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
b_slider = w.FloatLogSlider(value=50, base=10, min=-2, max=4, description='b [Ns/m] :', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
input_data = w.interactive_output(build_base_model, {'m':m_slider, 'k':k_slider, 'b':b_slider})
def update_sliders(index):
global m_slider, k_slider, b_slider
mval = [0.05, 0.1, 0.25, 0.5, 1, 5, 0.25]
kval = [1.25, 10, 100, 10, 50, 1000, 50]
bval = [1, 0.5, 2, 10, 10, 20, 1]
m_slider.value = mval[index]
k_slider.value = kval[index]
b_slider.value = bval[index]
if index == -1:
m_slider.disabled = True
k_slider.disabled = True
b_slider.disabled = True
else:
m_slider.disabled = False
k_slider.disabled = False
b_slider.disabled = False
input_data2 = w.interactive_output(update_sliders, {'index':typeSelect})
display(typeSelect, input_data2)
display(w.HBox([m_slider, k_slider, b_slider]), input_data)
```
Depending on your selection, the system is either under- or overdamped.
<br>
<b>Select an appropriate controller configuration! Which one is the best for your system? Why?<br>
Set up your controller for the fastest settling time with at most 25% overshoot!</b>
You can turn on/off each of the I and D components, and if D is active, you can apply the first-order filter as well, based on the derivating time constant.
```
# PID position control
fig2, ((f2_ax1, f2_ax2, f2_ax3), (f2_ax4, f2_ax5, f2_ax6)) = plt.subplots(2, 3)
fig2.set_size_inches((9.8, 5))
fig2.set_tight_layout(True)
f2_line1, = f2_ax1.plot([], [])
f2_line2, = f2_ax2.plot([], [])
f2_line3, = f2_ax3.plot([], [])
f2_line4, = f2_ax4.plot([], [])
f2_line5, = f2_ax5.plot([], [])
f2_line6, = f2_ax6.plot([], [])
f2_ax1.grid(which='both', axis='both', color='lightgray')
f2_ax2.grid(which='both', axis='both', color='lightgray')
f2_ax3.grid(which='both', axis='both', color='lightgray')
f2_ax4.grid(which='both', axis='both', color='lightgray')
f2_ax5.grid(which='both', axis='both', color='lightgray')
f2_ax6.grid(which='both', axis='both', color='lightgray')
f2_ax1.autoscale(enable=True, axis='both', tight=True)
f2_ax2.autoscale(enable=True, axis='both', tight=True)
f2_ax3.autoscale(enable=True, axis='both', tight=True)
f2_ax4.autoscale(enable=True, axis='both', tight=True)
f2_ax5.autoscale(enable=True, axis='both', tight=True)
f2_ax6.autoscale(enable=True, axis='both', tight=True)
f2_ax1.set_title('Closed loop step response', fontsize=9)
f2_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax1.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax1.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax2.set_title('Nyquist diagram', fontsize=9)
f2_ax2.set_xlabel(r'Re', labelpad=0, fontsize=8)
f2_ax2.set_ylabel(r'Im', labelpad=0, fontsize=8)
f2_ax2.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax3.set_title('Bode magniture plot', fontsize=9)
f2_ax3.set_xscale('log')
f2_ax3.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax3.set_ylabel(r'$A\/$[dB]', labelpad=0, fontsize=8)
f2_ax3.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax4.set_title('Closed loop impulse response', fontsize=9)
f2_ax4.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax4.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax4.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax5.set_title('Load transfer step response', fontsize=9)
f2_ax5.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=8)
f2_ax5.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=8)
f2_ax5.tick_params(axis='both', which='both', pad=0, labelsize=6)
f2_ax6.set_title('Bode phase plot', fontsize=9)
f2_ax6.set_xscale('log')
f2_ax6.set_xlabel(r'$f\/$[Hz]', labelpad=0, fontsize=8)
f2_ax6.set_ylabel(r'$\phi\/$[°]', labelpad=0, fontsize=8)
f2_ax6.tick_params(axis='both', which='both', pad=0, labelsize=6)
def position_control(Kp, Ti, Td, Fd, Ti0, Td0, Fd0, m, k, b):
W_sys = c.tf([1], [m, b, k])
# PID Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller in time constant format
W_open = c.series(W_PID, W_sys) # Open loop with two integrators added for position output
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
W_load = c.feedback(W_sys, W_PID, -1) # Transfer function of the load based errors
# Display
global f2_line1, f2_line2, f2_line3, f2_line4, f2_line5, f2_line6
f2_ax1.lines.remove(f2_line1)
f2_ax2.lines.remove(f2_line2)
f2_ax3.lines.remove(f2_line3)
f2_ax4.lines.remove(f2_line4)
f2_ax5.lines.remove(f2_line5)
f2_ax6.lines.remove(f2_line6)
tout, yout = c.step_response(W_closed)
f2_line1, = f2_ax1.plot(tout, yout, lw=1, color='blue')
_, _, ob = c.nyquist_plot(W_open, Plot=False) # Small resolution plot to determine bounds
real, imag, freq = c.nyquist_plot(W_open, omega=np.logspace(np.log10(ob[0]), np.log10(ob[-1]), 1000), Plot=False)
f2_line2, = f2_ax2.plot(real, imag, lw=1, color='blue')
mag, phase, omega = c.bode_plot(W_open, Plot=False)
f2_line3, = f2_ax3.plot(omega/2/np.pi, 20*np.log10(mag), lw=1, color='blue')
f2_line6, = f2_ax6.plot(omega/2/np.pi, phase*180/np.pi, lw=1, color='blue')
tout, yout = c.impulse_response(W_closed)
f2_line4, = f2_ax4.plot(tout, yout, lw=1, color='blue')
tout, yout = c.step_response(W_load)
f2_line5, = f2_ax5.plot(tout, yout, lw=1, color='blue')
f2_ax1.relim()
f2_ax2.relim()
f2_ax3.relim()
f2_ax4.relim()
f2_ax5.relim()
f2_ax6.relim()
f2_ax1.autoscale_view()
f2_ax2.autoscale_view()
f2_ax3.autoscale_view()
f2_ax4.autoscale_view()
f2_ax5.autoscale_view()
f2_ax6.autoscale_view()
def update_controller(index):
global Kp_slider, Ti_slider, Td_slider, Fd_slider, Ti_button, Td_button, Fd_button
if index == -1:
Kp_slider.value = 100
Td_slider.value = 0.05
Fd_slider.value = 10
Ti_button.value = False
Td_button.value = True
Fd_button.value = True
Kp_slider.disabled = True
Ti_slider.disabled = True
Td_slider.disabled = True
Fd_slider.disabled = True
Ti_button.disabled = True
Td_button.disabled = True
Fd_button.disabled = True
else:
Kp_slider.disabled = False
Ti_slider.disabled = False
Td_slider.disabled = False
Fd_slider.disabled = False
Ti_button.disabled = False
Td_button.disabled = False
Fd_button.disabled = False
# GUI widgets
Kp_slider = w.FloatLogSlider(value=0.5, base=10, min=-1, max=4, description='Kp:', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Ti_slider = w.FloatLogSlider(value=0.0035, base=10, min=-4, max=1, description='', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Td_slider = w.FloatLogSlider(value=1, base=10, min=-4, max=1, description='', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Fd_slider = w.FloatLogSlider(value=1, base=10, min=0, max=3, description='', continuous_update=False,
layout=w.Layout(width='auto', flex='5 5 auto'))
Ti_button = w.ToggleButton(value=True, description='Ti',
layout=w.Layout(width='auto', flex='1 1 0%'))
Td_button = w.ToggleButton(value=False, description='Td',
layout=w.Layout(width='auto', flex='1 1 0%'))
Fd_button = w.ToggleButton(value=False, description='Fd',
layout=w.Layout(width='auto', flex='1 1 0%'))
input_data = w.interactive_output(position_control, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'Ti0' : Ti_button, 'Td0': Td_button,
'Fd0': Fd_button, 'm':m_slider, 'k':k_slider, 'b':b_slider})
w.interactive_output(update_controller, {'index': typeSelect})
display(w.HBox([Kp_slider, Ti_button, Ti_slider, Td_button, Td_slider, Fd_button, Fd_slider]), input_data)
```
In the following simulation, you can observe the movement of your system based on your controller setup. You can create reference signals and even apply some disturbance and see how the system reacts.
<b>Is your configuration suitable for signal-following? Readjust your controller so that it can follow a sine wave acceptably!</b>
<br><br>
<i>(The animations are scaled to fit the frame through the whole simulation. Because of this, unstable solutions might not seem to move until the very last second.)</i>
```
# Simulation data
anim_fig = plt.figure()
anim_fig.set_size_inches((9.8, 6))
anim_fig.set_tight_layout(True)
anim_ax1 = anim_fig.add_subplot(211)
anim_ax2 = anim_ax1.twinx()
frame_count=1000
l1 = anim_ax1.plot([], [], lw=1, color='blue')
l2 = anim_ax1.plot([], [], lw=2, color='red')
l3 = anim_ax2.plot([], [], lw=1, color='grey')
line1 = l1[0]
line2 = l2[0]
line3 = l3[0]
anim_ax1.legend(l1+l2+l3, ['Reference [m]', 'Output [m]', 'Load [N]'], loc=1)
anim_ax1.set_title('Time response simulation', fontsize=12)
anim_ax1.set_xlabel(r'$t\/$[s]', labelpad=0, fontsize=10)
anim_ax1.set_ylabel(r'$x\/$[m]', labelpad=0, fontsize=10)
anim_ax1.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax2.set_ylabel(r'$F\/$[N]', labelpad=0, fontsize=10)
anim_ax2.tick_params(axis='both', which='both', pad=0, labelsize=8)
anim_ax1.grid(which='both', axis='both', color='lightgray')
T_plot = []
X_plot = []
L_plot = []
R_plot = []
# Scene data
scene_ax = anim_fig.add_subplot(212)
scene_ax.set_xlim((-3, 4))
scene_ax.set_ylim((-0.5, 1.5))
scene_ax.axis('off')
scene_ax.plot([-2.5, -2.3, -2.3, -0.3, -2.3, -2.3, -0.3], [0.75, 0.75, 0.9, 0.9, 0.9, 0.6, 0.6], lw=2, color='blue', zorder=0)
scene_ax.plot([-2.5, -2.3], [0.25, 0.25], lw=2, color='red', zorder=0)
scene_ax.plot([-2.5, -2.5], [1.25, -0.25], lw=4, color='gray', zorder=2)
scene_ax.text(-1.3, 1, 'b', fontsize=14, color='blue', va='bottom', zorder=5)
scene_ax.text(-1.3, 0, 'k', fontsize=14, color='red', va='top', zorder=5)
b_line, = scene_ax.plot([], [], lw=2, color='blue')
k_line, = scene_ax.plot([], [], lw=2, color='red')
m_text = scene_ax.text(1.75, 0.5, 'm', fontsize=14, color='green', va='center', ha='center', zorder=5)
m_box = patches.Rectangle((1, 0), 1.5, 1, lw=2, color='green', fill=False, zorder=10)
scene_ax.add_patch(m_box)
x_arrow = scene_ax.arrow(1.75, -0.5, 0, 0.25, color='blue', head_width=0.1,
length_includes_head=True, lw=1, fill=False, zorder=5)
r_arrow = scene_ax.arrow(1.75, -0.5, 0, 0.25, color='red', head_width=0.1,
length_includes_head=True, lw=1, fill=False, zorder=5)
base_arrow = x_arrow.xy
pos_var = []
ref_var = []
#Simulation function
def simulation(Kp, Ti, Td, Fd, Ti0, Td0, Fd0, m, k, b, T, dt, X, Xf, Xa, Xo, L, Lf, La, Lo):
# Controller
P = Kp # Proportional term
I = Kp / Ti # Integral term
D = Kp * Td # Derivative term
Td_f = Td / Fd # Derivative term filter
W_PID = c.parallel(c.tf([P], [1]),
c.tf([I * Ti0], [1 * Ti0, 1 * (not Ti0)]),
c.tf([D * Td0, 0], [Td_f * Td0 * Fd0, 1])) # PID controller
# System
W_sys = c.tf([1], [m, b, k])
# Model
W_open = c.series(W_PID, W_sys) # Open loop with two integrators added for position output
W_closed = c.feedback(W_open, 1, -1) # Closed loop with negative feedback
W_load = c.feedback(W_sys, W_PID, -1) # Transfer function of the load based errors
# Reference and disturbance signals
T_sim = np.arange(0, T, dt, dtype=np.float64)
if X == 0: # Constant reference
X_sim = np.full_like(T_sim, Xa * Xo)
elif X == 1: # Sine wave reference
X_sim = (np.sin(2 * np.pi * Xf * T_sim) + Xo) * Xa
elif X == 2: # Square wave reference
X_sim = (np.sign(np.sin(2 * np.pi * Xf * T_sim)) + Xo) * Xa
if L == 0: # Constant load
L_sim = np.full_like(T_sim, La * Lo)
elif L == 1: # Sine wave load
L_sim = (np.sin(2 * np.pi * Lf * T_sim) + Lo) * La
elif L == 2: # Square wave load
L_sim = (np.sign(np.sin(2 * np.pi * Lf * T_sim)) + Lo) * La
elif L_type.value == 3: # Noise form load
L_sim = np.interp(T_sim, np.linspace(0, T, int(T * Lf) + 2),
np.random.normal(loc=(Lo * La), scale=La, size=int(T * Lf) + 2))
# System response
Tx, youtx, xoutx = c.forced_response(W_closed, T_sim, X_sim)
Tl, youtl, xoutl = c.forced_response(W_load, T_sim, L_sim)
R_sim = np.nan_to_num(youtx + youtl)
# Display
XR_max = max(np.amax(np.absolute(np.concatenate((X_sim, R_sim)))), Xa)
L_max = max(np.amax(np.absolute(L_sim)), La)
anim_ax1.set_xlim((0, T))
anim_ax1.set_ylim((-1.2 * XR_max, 1.2 * XR_max))
anim_ax2.set_ylim((-1.5 * L_max, 1.5 * L_max))
global T_plot, X_plot, L_plot, R_plot, pos_var, ref_var
T_plot = np.linspace(0, T, frame_count, dtype=np.float32)
X_plot = np.interp(T_plot, T_sim, X_sim)
L_plot = np.interp(T_plot, T_sim, L_sim)
R_plot = np.interp(T_plot, T_sim, R_sim)
pos_var = R_plot/XR_max
ref_var = X_plot/XR_max
def anim_init():
line1.set_data([], [])
line2.set_data([], [])
line3.set_data([], [])
b_line.set_data([], [])
k_line.set_data([], [])
x_arrow.set_xy(base_arrow)
r_arrow.set_xy(base_arrow)
m_text.set_position((1.75, 0.5))
m_box.set_xy((1, 0))
return (line1, line2, line3, m_text, m_box, b_line, k_line,)
def animate(i):
line1.set_data(T_plot[0:i], X_plot[0:i])
line2.set_data(T_plot[0:i], R_plot[0:i])
line3.set_data(T_plot[0:i], L_plot[0:i])
b_line.set_data([-1.3, -1.3, -1.3, 1]+pos_var[i], [0.66, 0.84, 0.75, 0.75])
k_line.set_data(np.append(np.array([0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 22])*(pos_var[i]+2)/20-2.3, pos_var[i]+1),
[0.25, 0.34, 0.16, 0.34, 0.16, 0.34, 0.16, 0.34, 0.16, 0.34, 0.16, 0.34, 0.25, 0.25])
x_arrow.set_xy(base_arrow+[ref_var[i], 0])
r_arrow.set_xy(base_arrow+[pos_var[i], 0])
m_text.set_position((pos_var[i]+1.75, 0.5))
m_box.set_x(pos_var[i]+1)
return (line1, line2, line3, m_text, m_box, b_line, k_line,)
anim = animation.FuncAnimation(anim_fig, animate, init_func=anim_init,
frames=frame_count, interval=10, blit=True,
repeat=True)
# Controllers
T_slider = w.FloatLogSlider(value=10, base=10, min=-0.7, max=1, step=0.01,
description='Duration [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
dt_slider = w.FloatLogSlider(value=0.1, base=10, min=-3, max=-1, step=0.01,
description='Timestep [s]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
X_type = w.Dropdown(options=[('Constant', 0), ('Sine', 1), ('Square', 2)], value=1,
description='Reference: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto'))
Xf_slider = w.FloatLogSlider(value=0.5, base=10, min=-2, max=2, step=0.01,
description='Frequency [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xa_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='Amplitude [m]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Xo_slider = w.FloatSlider(value=0, min=-10, max=10, description='Offset/Ampl:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
L_type = w.Dropdown(options=[('Constant', 0), ('Sine', 1), ('Square', 2), ('Noise', 3)], value=2,
description='Load: ', continuous_update=False, layout=w.Layout(width='auto', flex='3 3 auto'))
Lf_slider = w.FloatLogSlider(value=1, base=10, min=-2, max=2, step=0.01,
description='Frequency [Hz]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
La_slider = w.FloatLogSlider(value=0.1, base=10, min=-2, max=2, step=0.01,
description='Amplitude [N]:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
Lo_slider = w.FloatSlider(value=0, min=-10, max=10, description='Offset/Ampl:', continuous_update=False,
orientation='vertical', layout=w.Layout(width='auto', height='auto', flex='1 1 auto'))
input_data = w.interactive_output(simulation, {'Kp': Kp_slider, 'Ti': Ti_slider, 'Td': Td_slider,
'Fd': Fd_slider, 'Ti0' : Ti_button, 'Td0': Td_button,
'Fd0': Fd_button,
'm':m_slider, 'k':k_slider, 'b':b_slider,
'T': T_slider, 'dt': dt_slider,
'X': X_type, 'Xf': Xf_slider, 'Xa': Xa_slider, 'Xo': Xo_slider,
'L': L_type, 'Lf': Lf_slider, 'La': La_slider, 'Lo': Lo_slider})
display(w.HBox([w.HBox([T_slider, dt_slider], layout=w.Layout(width='25%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([X_type, w.HBox([Xf_slider, Xa_slider, Xo_slider])], layout=w.Layout(width='30%')),
w.Box([], layout=w.Layout(width='5%')),
w.VBox([L_type, w.HBox([Lf_slider, La_slider, Lo_slider])], layout=w.Layout(width='30%'))],
layout=w.Layout(width='100%', justify_content='center')), input_data)
```
The duration parameter controls the simulated timeframe and does not affect the runtime of the animation. In contrast, the timestep controls the model sampling and can refine the results in exchange for higher computational resources.
| true |
code
| 0.503357 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/kartikgill/The-GAN-Book/blob/main/Skill-07/W-GAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Importing useful Libraries
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
%matplotlib inline
import tensorflow
print (tensorflow.__version__)
```
# Download and show data
```
from tensorflow.keras.datasets import fashion_mnist, mnist
(trainX, trainY), (testX, testY) = mnist.load_data()
print('Training data shapes: X=%s, y=%s' % (trainX.shape, trainY.shape))
print('Testing data shapes: X=%s, y=%s' % (testX.shape, testY.shape))
for k in range(9):
plt.figure(figsize=(9, 6))
for j in range(9):
i = np.random.randint(0, 10000)
plt.subplot(990 + 1 + j)
plt.imshow(trainX[i], cmap='gray_r')
#plt.title(trainY[i])
plt.axis('off')
plt.show()
#Ten classes
set(trainY)
```
# Data Normalization
```
trainX = [(image-127.5)/127.5 for image in trainX]
testX = [(image-127.5)/127.5 for image in testX]
trainX = np.reshape(trainX, (60000, 28, 28, 1))
testX = np.reshape(testX, (10000, 28, 28, 1))
print (trainX.shape, testX.shape, trainY.shape, testY.shape)
```
# Define Generator Model
```
random_input = tensorflow.keras.layers.Input(shape = 100)
x = tensorflow.keras.layers.Dense(7*7*128)(random_input)
x = tensorflow.keras.layers.Reshape((7, 7, 128))(x)
x = tensorflow.keras.layers.Conv2DTranspose(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)
x = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)
x = tensorflow.keras.layers.Activation('relu')(x)
x = tensorflow.keras.layers.Conv2DTranspose(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)
x = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)
x = tensorflow.keras.layers.Activation('relu')(x)
x = tensorflow.keras.layers.Conv2DTranspose(filters=128, kernel_size=(3,3), padding='same')(x)
x = tensorflow.keras.layers.Activation('relu')(x)
x = tensorflow.keras.layers.Conv2DTranspose(filters=1, kernel_size=(4,4), padding='same')(x)
generated_image = tensorflow.keras.layers.Activation('tanh')(x)
generator_network = tensorflow.keras.models.Model(inputs=random_input, outputs=generated_image)
generator_network.summary()
```
# Define Critic
```
image_input = tensorflow.keras.layers.Input(shape=(28, 28, 1))
x = tensorflow.keras.layers.Conv2D(filters=64, kernel_size=(3,3), strides=2, padding='same')(image_input)
x = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tensorflow.keras.layers.Dropout(0.25)(x)
x = tensorflow.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)
x = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)
x = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tensorflow.keras.layers.Dropout(0.25)(x)
x = tensorflow.keras.layers.Conv2D(filters=128, kernel_size=(3,3), strides=2, padding='same')(x)
x = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)
x = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tensorflow.keras.layers.Dropout(0.25)(x)
x = tensorflow.keras.layers.Conv2D(filters=256, kernel_size=(3,3), padding='same')(x)
x = tensorflow.keras.layers.BatchNormalization(momentum=0.8)(x)
x = tensorflow.keras.layers.LeakyReLU(alpha=0.2)(x)
x = tensorflow.keras.layers.Dropout(0.25)(x)
x = tensorflow.keras.layers.Flatten()(x)
# No activation in final layer
c_out = tensorflow.keras.layers.Dense(1)(x)
critic_network = tensorflow.keras.models.Model(inputs=image_input, outputs=c_out)
print (critic_network.summary())
```
# Define Wasserstein Loss
```
# custom loss function
def wasserstein_loss(y_true, y_pred):
return tensorflow.keras.backend.mean(y_true * y_pred)
```
# Compiling Critic Network
```
RMSprop_optimizer = tensorflow.keras.optimizers.RMSprop(lr=0.00005)
critic_network.compile(loss=wasserstein_loss, optimizer=RMSprop_optimizer, metrics=['accuracy'])
```
# Define Wasserstein GAN (W-GAN)
```
critic_network.trainable=False
g_output = generator_network(random_input)
c_output = critic_network(g_output)
wgan_model = tensorflow.keras.models.Model(inputs = random_input, outputs = c_output)
wgan_model.summary()
```
# Compiling WGAN
```
wgan_model.compile(loss=wasserstein_loss, optimizer=RMSprop_optimizer)
```
# Define Data Generators
```
indices = [i for i in range(0, len(trainX))]
def get_random_noise(batch_size, noise_size):
random_values = np.random.randn(batch_size*noise_size)
random_noise_batches = np.reshape(random_values, (batch_size, noise_size))
return random_noise_batches
def get_fake_samples(generator_network, batch_size, noise_size):
random_noise_batches = get_random_noise(batch_size, noise_size)
fake_samples = generator_network.predict_on_batch(random_noise_batches)
return fake_samples
def get_real_samples(batch_size):
random_indices = np.random.choice(indices, size=batch_size)
real_images = trainX[np.array(random_indices),:]
return real_images
def show_generator_results(generator_network):
for k in range(7):
plt.figure(figsize=(9, 6))
random_noise_batches = get_random_noise(7, noise_size)
fake_samples = generator_network.predict_on_batch(random_noise_batches)
for j in range(7):
i = j
plt.subplot(770 + 1 + j)
plt.imshow(((fake_samples[i,:,:,-1])/2.0)+0.5, cmap='gray_r')
plt.axis('off')
plt.show()
return
```
# Training W-GAN
```
epochs = 500
batch_size = 64
steps = 500
noise_size = 100
for i in range(0, epochs):
if (i%1 == 0):
op = show_generator_results(generator_network)
#print (op)
for j in range(steps):
# With Number of Critics=5
for _ in range(5):
fake_samples = get_fake_samples(generator_network, batch_size//2, noise_size)
real_samples = get_real_samples(batch_size=batch_size//2)
fake_y = np.ones((batch_size//2, 1))
real_y = -1 * np.ones((batch_size//2, 1))
# Updating Critic weights
critic_network.trainable=True
loss_c_real = critic_network.train_on_batch(real_samples, real_y)
loss_c_fake = critic_network.train_on_batch(fake_samples, fake_y)
loss_c = np.add(loss_c_real, loss_c_fake)/2.0
# Clip critic weights
for l in critic_network.layers:
weights = l.get_weights()
weights = [np.clip(w, -0.01, 0.01) for w in weights]
l.set_weights(weights)
if False:
print ("C_real_loss: %.3f, C_fake_loss: %.3f, C_loss: %.3f"%(loss_c_real[0], loss_c_fake[0], loss_c[0]))
noise_batches = get_random_noise(batch_size, noise_size)
wgan_input = noise_batches
# Make the Discriminator belive that these are real samples and calculate loss to train the generator
wgan_output = -1 * np.ones((batch_size, 1))
# Updating Generator weights
critic_network.trainable=False
loss_g = wgan_model.train_on_batch(wgan_input, wgan_output)
if j%50 == 0:
print ("Epoch:%.0f, Step:%.0f, C-Loss:%.6f, G-Loss:%.6f"%(i,j,loss_c[0] ,loss_g))
```
# Results
```
for i in range(2):
show_generator_results(generator_network)
print("-"*100)
```
| true |
code
| 0.706988 | null | null | null | null |
|
# Day 13 - Prime number factors
* https://adventofcode.com/2020/day/13
For part 1, we need to find the next multiple of a bus ID that's equal to or greater than our earliest departure time. The bus IDs, which determine their frequency, are all prime numbers, of course.
We can calculate the next bus departure $t$ for a given ID $b$ on or after earliest departure time $T$ as $t = b * \lceil T / b \rceil$ ($b$ multiplied by the ceiling of the division of $T$ by $b$).
```
import math
def parse_bus_ids(line: str) -> list[int]:
return [int(b) for b in line.split(",") if b[0] != "x"]
def parse_input(lines: str) -> [int, list[int]]:
return int(lines[0]), parse_bus_ids(lines[1])
def earliest_departure(earliest: int, bus_ids: list[int]) -> tuple[int, int]:
t, bid = min((bid * math.ceil(earliest / bid), bid) for bid in bus_ids)
return t - earliest, bid
test_earliest, test_bus_ids = parse_input(["939", "7,13,x,x,59,x,31,19"])
assert earliest_departure(test_earliest, test_bus_ids) == (5, 59)
import aocd
data = aocd.get_data(day=13, year=2020).splitlines()
earliest, bus_ids = parse_input(data)
wait_time, bus_id = earliest_departure(earliest, bus_ids)
print("Part 1:", wait_time * bus_id)
```
## Part 2: Chinese remainder theorem.
For part 2, we need to use the [Chinese remainder theorem](https://en.wikipedia.org/wiki/Chinese_remainder_theorem); this theorem was first introduced by the Chinese mathematician Sun-tzu (quote from the Wikipedia article):
> There are certain things whose number is unknown. If we count them by threes, we have two left over; by fives, we have three left over; and by sevens, two are left over. How many things are there?
We need to find a number that if counted in prime number steps, have an offset left over, where the offset is the prime number minus the index in the bus ids list, modulo the bus id (the matching time stamp lies X minutes *before* the next bus departs).
I only remembered about the theorem as it was also applicable to [Advent of Code 2017, day 13](../2017/Day%2013.ipynb) (although I didn't know it at the time).
I adapted the [Rossetta Stone Python implementation](https://rosettacode.org/wiki/Chinese_remainder_theorem#Python) for this:
```
from functools import reduce
from operator import mul
from typing import Optional
def solve_chinese_remainder(bus_times: list[Optional[int]]) -> int:
product = reduce(mul, (bid for bid in filter(None, bus_times)))
summed = sum(
((bid - i) % bid) * mul_inv((factor := product // bid), bid) * factor
for i, bid in enumerate(bus_times)
if bid is not None
)
return summed % product
def mul_inv(a: int, b: int) -> int:
if b == 1: return 1
b0, x0, x1 = b, 0, 1
while a > 1:
q = a // b
a, b = b, a % b
x0, x1 = x1 - q * x0, x0
if x1 < 0:
x1 += b0
return x1
def parse_bus_times(line: str) -> list[Optional[int]]:
return [None if bus_id == "x" else int(bus_id) for bus_id in line.split(",")]
tests = {
"7,13,x,x,59,x,31,19": 1068781,
"17,x,13,19": 3417,
"67,7,59,61": 754018,
"67,x,7,59,61": 779210,
"67,7,x,59,61": 1261476,
"1789,37,47,1889": 1202161486,
}
for times, expected in tests.items():
assert solve_chinese_remainder(parse_bus_times(times)) == expected
print("Part 2:", solve_chinese_remainder(parse_bus_times(data[1])))
```
| true |
code
| 0.544559 | null | null | null | null |
|
```
import glob, sys
from IPython.display import HTML
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from astropy.io import fits
from pyflowmaps.flow import flowLCT
import warnings
warnings.filterwarnings("ignore")
```
# Load the data
We include in the folder *data/* a cube fits file with the data coaligned and centered in a active region NOAA 1757.
```
cube = fits.getdata('data/cube_sunspot.fits')
print(cube.shape)
```
Look into one of the frames in the cube.
```
fig, ax = plt.subplots(figsize=(10,10))
im=ax.imshow(cube[15],origin='lower',cmap='gray')
ax.set_title('NOAA 1757 frame no. 15')
ax.set_xlabel('X-axis [pix]')
ax.set_ylabel('Y-axis [pix]')
fig.colorbar(im,ax=ax,label='Intensity',shrink=0.82,aspect=15)
```
The shape of the data corresponds to 30 images with 128x128 pix dimesions per image. The frames are cut off from HMI/SDO data from 2013-01-05, intensity product, wtih a cadence of $720 s$, and the pixel size is around $\sim 0.504$. Other parameter we need is the size of the apodization window $FWHM$ which for this example will be $3\, arcsec$. This size depends on the size of the feature you want to study, as well as the resolution of your instrument. Other parameter that is neccesary is the average time over which the velocities will be calculated, but actually, it is included on the size of the input cube. For this example, the time over the average will be calculate is 6 hours ($30\times720 s=21600 s=6 h$).
```
flows = flowLCT(cube, 3, 0.504, 720,method='square',interpolation='fivepoint',window='boxcar')
```
We extract the velocities
```
vx = flows.vx
vy = flows.vy
vz = flows.vz
```
Velocities are returned in $kms^{-1}$. The velocity $v_z$ comes from
$$
v_z = h_m\nabla\cdot v_h(v_x,v_y)
$$
where $v_h$ are the horizontal velocities which depends on $v_x$ and $v_y$, whereas $h_m=150\,km$ is the mass-flux scale-heigth [(November 1989, ApJ,344,494)](https://ui.adsabs.harvard.edu/abs/1989ApJ...344..494N/abstract). Some authors prefer to show the divergences instead of the $v_z$, so the user just need to divide $v_z/h_m$.
Next, the users can also create colormaps and personlize them.
```
from matplotlib import cm
from matplotlib.colors import ListedColormap
top = cm.get_cmap('Reds_r', 128)
bottom = cm.get_cmap('YlGn', 128)
newcolors = np.vstack((top(np.linspace(0.3, 1, 128)),
bottom(np.linspace(0, 0.75, 128))))
newcmp = ListedColormap(newcolors, name='RdYlGn')
```
Now, we will plot the flows in each horizontal direction, and the divergence.
```
fig, ax = plt.subplots(1,3,figsize=(15,8),sharey=True)
plt.subplots_adjust(wspace=0.03)
flowx=ax[0].imshow(vx,origin='lower',cmap='RdYlGn',vmin = vx.mean()-3*vx.std(),vmax=vx.mean()+3*vx.std())
ax[0].set_title('Horizontal flowmap vx')
ax[0].set_xlabel('X-axis [pix]')
ax[0].set_ylabel('Y-axis [pix]')
flowy=ax[1].imshow(vy,origin='lower',cmap='RdYlGn',vmin = vy.mean()-3*vy.std(),vmax=vy.mean()+3*vy.std())
ax[1].set_title('Horizontal flowmap vy')
ax[1].set_xlabel('X-axis [pix]')
div = vz/150
flowz=ax[2].imshow(div,origin='lower',cmap='RdYlGn',vmin = div.mean()-3*div.std(),vmax=div.mean()+3*div.std())
ax[2].set_title('Horizontal flowmap divergence')
ax[2].set_xlabel('X-axis [pix]')
fig.colorbar(flowx,ax=ax[0],orientation='horizontal',shrink=1,label='vx [km/s]')
fig.colorbar(flowy,ax=ax[1],orientation='horizontal',shrink=1,label='vy [km/s]')
fig.colorbar(flowz,ax=ax[2],orientation='horizontal',shrink=1,label='divergence')
fig.savefig('/Users/joseivan/pyflowmaps/images/flowmaps.jpg',format='jpeg',bbox_inches='tight')
```
Finally, we can also plot the arrows associated with the horizontal velocities
```
xx,yy = np.meshgrid(np.arange(128),np.arange(128)) # we create a grid
dense = 2 # each how many pixels you want to plot arrows
fig,ax = plt.subplots(figsize=(10,10))
Q = ax.quiver(xx[::dense,::dense],yy[::dense,::dense],vx[::dense,::dense],vy[::dense,::dense],
color='k', scale=8, headwidth= 4, headlength=4, width=0.0012)
im = ax.imshow(cube[15],cmap='gray',origin='lower')
ax.set_title('Flowmap horizontal velocities overplotted')
ax.set_xlabel('X-axis [pix]')
ax.set_ylabel('Y-axis [pix]')
fig.colorbar(im,ax=ax,label='Intensity',shrink=0.82,aspect=15)
fig.savefig('/Users/joseivan/pyflowmaps/images/flowmaps_arrows.jpg',format='jpeg',bbox_inches='tight')
```
| true |
code
| 0.607838 | null | null | null | null |
|
# Video Super Resolution with OpenVINO
Super Resolution is the process of enhancing the quality of an image by increasing the pixel count using deep learning. This notebook applies Single Image Super Resolution (SISR) to frames in a 360p (480×360) video in 360p resolution. We use a model called [single-image-super-resolution-1032](https://github.com/openvinotoolkit/open_model_zoo/tree/develop/models/intel/single-image-super-resolution-1032) which is available from the Open Model Zoo. It is based on the research paper cited below.
Y. Liu et al., ["An Attention-Based Approach for Single Image Super Resolution,"](https://arxiv.org/abs/1807.06779) 2018 24th International Conference on Pattern Recognition (ICPR), 2018, pp. 2777-2784, doi: 10.1109/ICPR.2018.8545760.
**NOTE:** The Single Image Super Resolution (SISR) model used in this demo is not optimized for video. Results may vary depending on the video. We are looking for a more suitable Multi Image Super Resolution (MISR) model, so if you know of a great open source model, please let us know! You can start a [discussion](https://github.com/openvinotoolkit/openvino_notebooks/discussions) or create an [issue](https://github.com/openvinotoolkit/openvino_notebooks/issues) on GitHub.
## Preparation
### Imports
```
import os
import time
import urllib
from pathlib import Path
import cv2
import numpy as np
from IPython.display import (
HTML,
FileLink,
Pretty,
ProgressBar,
Video,
clear_output,
display,
)
from openvino.inference_engine import IECore
from pytube import YouTube
```
### Settings
```
# Device to use for inference. For example, "CPU", or "GPU"
DEVICE = "CPU"
# 1032: 4x superresolution, 1033: 3x superresolution
MODEL_FILE = "model/single-image-super-resolution-1032.xml"
model_name = os.path.basename(MODEL_FILE)
model_xml_path = Path(MODEL_FILE).with_suffix(".xml")
```
### Functions
```
def write_text_on_image(image: np.ndarray, text: str) -> np.ndarray:
"""
Write the specified text in the top left corner of the image
as white text with a black border.
:param image: image as numpy arry with HWC shape, RGB or BGR
:param text: text to write
:return: image with written text, as numpy array
"""
font = cv2.FONT_HERSHEY_PLAIN
org = (20, 20)
font_scale = 4
font_color = (255, 255, 255)
line_type = 1
font_thickness = 2
text_color_bg = (0, 0, 0)
x, y = org
image = cv2.UMat(image)
(text_w, text_h), _ = cv2.getTextSize(
text=text, fontFace=font, fontScale=font_scale, thickness=font_thickness
)
result_im = cv2.rectangle(
img=image, pt1=org, pt2=(x + text_w, y + text_h), color=text_color_bg, thickness=-1
)
textim = cv2.putText(
img=result_im,
text=text,
org=(x, y + text_h + font_scale - 1),
fontFace=font,
fontScale=font_scale,
color=font_color,
thickness=font_thickness,
lineType=line_type,
)
return textim.get()
def load_image(path: str) -> np.ndarray:
"""
Loads an image from `path` and returns it as BGR numpy array.
:param path: path to an image filename or url
:return: image as numpy array, with BGR channel order
"""
if path.startswith("http"):
# Set User-Agent to Mozilla because some websites block requests
# with User-Agent Python
request = urllib.request.Request(url=path, headers={"User-Agent": "Mozilla/5.0"})
response = urllib.request.urlopen(url=request)
array = np.asarray(bytearray(response.read()), dtype="uint8")
image = cv2.imdecode(buf=array, flags=-1) # Loads the image as BGR
else:
image = cv2.imread(filename=path)
return image
def convert_result_to_image(result) -> np.ndarray:
"""
Convert network result of floating point numbers to image with integer
values from 0-255. Values outside this range are clipped to 0 and 255.
:param result: a single superresolution network result in N,C,H,W shape
"""
result = result.squeeze(0).transpose(1, 2, 0)
result *= 255
result[result < 0] = 0
result[result > 255] = 255
result = result.astype(np.uint8)
return result
```
## Load the Superresolution Model
Load the model in Inference Engine with `ie.read_network` and load it to the specified device with `ie.load_network`
```
ie = IECore()
net = ie.read_network(model=model_xml_path)
exec_net = ie.load_network(network=net, device_name=DEVICE)
```
Get information about network inputs and outputs. The Super Resolution model expects two inputs: 1) the input image, 2) a bicubic interpolation of the input image to the target size 1920x1080. It returns the super resolution version of the image in 1920x1800.
```
# Network inputs and outputs are dictionaries. Get the keys for the
# dictionaries.
original_image_key = list(exec_net.input_info)[0]
bicubic_image_key = list(exec_net.input_info)[1]
output_key = list(exec_net.outputs.keys())[0]
# Get the expected input and target shape. `.dims[2:]` returns the height
# and width. OpenCV's resize function expects the shape as (width, height),
# so we reverse the shape with `[::-1]` and convert it to a tuple
input_height, input_width = tuple(exec_net.input_info[original_image_key].tensor_desc.dims[2:])
target_height, target_width = tuple(exec_net.input_info[bicubic_image_key].tensor_desc.dims[2:])
upsample_factor = int(target_height / input_height)
print(f"The network expects inputs with a width of {input_width}, " f"height of {input_height}")
print(f"The network returns images with a width of {target_width}, " f"height of {target_height}")
print(
f"The image sides are upsampled by a factor {upsample_factor}. "
f"The new image is {upsample_factor**2} times as large as the "
"original image"
)
```
## Superresolution on Video
Download a YouTube\* video with PyTube and enhance the video quality with superresolution.
By default only the first 100 frames of the video are processed. Change NUM_FRAMES in the cell below to modify this.
**Note:**
- The resulting video does not contain audio.
- The input video should be a landscape video and have an an input resultion of 360p (640x360) for the 1032 model, or 480p (720x480) for the 1033 model.
### Settings
```
VIDEO_DIR = "data"
OUTPUT_DIR = "output"
os.makedirs(name=str(OUTPUT_DIR), exist_ok=True)
# Number of frames to read from the input video. Set to 0 to read all frames.
NUM_FRAMES = 100
# The format for saving the result video's
# vp09 is slow, but widely available. If you have FFMPEG installed, you can
# change the FOURCC to `*"THEO"` to improve video writing speed
FOURCC = cv2.VideoWriter_fourcc(*"vp09")
```
### Download and Prepare Video
```
# Use pytube to download a video. It downloads to the videos subdirectory.
# You can also place a local video there and comment out the following lines
VIDEO_URL = "https://www.youtube.com/watch?v=V8yS3WIkOrA"
yt = YouTube(VIDEO_URL)
# Use `yt.streams` to see all available streams. See the PyTube documentation
# https://python-pytube.readthedocs.io/en/latest/api.html for advanced
# filtering options
try:
os.makedirs(name=VIDEO_DIR, exist_ok=True)
stream = yt.streams.filter(resolution="360p").first()
filename = Path(stream.default_filename.encode("ascii", "ignore").decode("ascii")).stem
stream.download(output_path=OUTPUT_DIR, filename=filename)
print(f"Video {filename} downloaded to {OUTPUT_DIR}")
# Create Path objects for the input video and the resulting videos
video_path = Path(stream.get_file_path(filename, OUTPUT_DIR))
except Exception:
# If PyTube fails, use a local video stored in the VIDEO_DIR directory
video_path = Path(rf"{VIDEO_DIR}/CEO Pat Gelsinger on Leading Intel.mp4")
# Path names for the result videos
superres_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_superres.mp4")
bicubic_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_bicubic.mp4")
comparison_video_path = Path(f"{OUTPUT_DIR}/{video_path.stem}_superres_comparison.mp4")
# Open the video and get the dimensions and the FPS
cap = cv2.VideoCapture(filename=str(video_path))
ret, image = cap.read()
if not ret:
raise ValueError(f"The video at '{video_path}' cannot be read.")
fps = cap.get(cv2.CAP_PROP_FPS)
original_frame_height, original_frame_width = image.shape[:2]
cap.release()
print(
f"The input video has a frame width of {original_frame_width}, "
f"frame height of {original_frame_height} and runs at {fps:.2f} fps"
)
```
Create superresolution video, bicubic video and comparison video. The superresolution video contains the enhanced video, upsampled with superresolution, the bicubic video is the input video upsampled with bicubic interpolation, the combination video sets the bicubic video and the superresolution side by side.
```
superres_video = cv2.VideoWriter(
filename=str(superres_video_path),
fourcc=FOURCC,
fps=fps,
frameSize=(target_width, target_height),
)
bicubic_video = cv2.VideoWriter(
filename=str(bicubic_video_path),
fourcc=FOURCC,
fps=fps,
frameSize=(target_width, target_height),
)
comparison_video = cv2.VideoWriter(
filename=str(comparison_video_path),
fourcc=FOURCC,
fps=fps,
frameSize=(target_width * 2, target_height),
)
```
### Do Inference
Read video frames and enhance them with superresolution. Save the superresolution video, the bicubic video and the comparison video to file.
The code in this cell reads the video frame by frame. Each frame is resized and reshaped to network input shape and upsampled with bicubic interpolation to target shape. Both the original and the bicubic image are propagated through the network. The network result is a numpy array with floating point values, with a shape of (1,3,1920,1080). This array is converted to an 8-bit image with shape (1080,1920,3) and written to `superres_video`. The bicubic image is written to `bicubic_video` for comparison. Lastly, the bicubic and result frames are combined side by side and written to `comparison_video`. A progress bar shows the progress of the process. Inference time is measured, as well as total time to process each frame, which includes inference time as well as the time it takes to process and write the video.
```
start_time = time.perf_counter()
frame_nr = 1
total_inference_duration = 0
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if NUM_FRAMES == 0 else NUM_FRAMES
progress_bar = ProgressBar(total=total_frames)
progress_bar.display()
cap = cv2.VideoCapture(filename=str(video_path))
try:
while cap.isOpened():
ret, image = cap.read()
if not ret:
cap.release()
break
if NUM_FRAMES > 0 and frame_nr == NUM_FRAMES:
break
# Resize the input image to network shape and convert from (H,W,C) to
# (N,C,H,W)
resized_image = cv2.resize(src=image, dsize=(input_width, input_height))
input_image_original = np.expand_dims(resized_image.transpose(2, 0, 1), axis=0)
# Resize and reshape the image to the target shape with bicubic
# interpolation
bicubic_image = cv2.resize(
src=image, dsize=(target_width, target_height), interpolation=cv2.INTER_CUBIC
)
input_image_bicubic = np.expand_dims(bicubic_image.transpose(2, 0, 1), axis=0)
# Do inference
inference_start_time = time.perf_counter()
result = exec_net.infer(
inputs={
original_image_key: input_image_original,
bicubic_image_key: input_image_bicubic,
}
)[output_key]
inference_stop_time = time.perf_counter()
inference_duration = inference_stop_time - inference_start_time
total_inference_duration += inference_duration
# Transform inference result into an image
result_frame = convert_result_to_image(result=result)
# Write resulting image and bicubic image to video
superres_video.write(image=result_frame)
bicubic_video.write(image=bicubic_image)
stacked_frame = np.hstack((bicubic_image, result_frame))
comparison_video.write(image=stacked_frame)
frame_nr = frame_nr + 1
# Update progress bar and status message
progress_bar.progress = frame_nr
progress_bar.update()
if frame_nr % 10 == 0:
clear_output(wait=True)
progress_bar.display()
display(
Pretty(
f"Processed frame {frame_nr}. Inference time: "
f"{inference_duration:.2f} seconds "
f"({1/inference_duration:.2f} FPS)"
)
)
except KeyboardInterrupt:
print("Processing interrupted.")
finally:
superres_video.release()
bicubic_video.release()
comparison_video.release()
end_time = time.perf_counter()
duration = end_time - start_time
print(f"Video's saved to {comparison_video_path.parent} directory.")
print(
f"Processed {frame_nr} frames in {duration:.2f} seconds. Total FPS "
f"(including video processing): {frame_nr/duration:.2f}. "
f"Inference FPS: {frame_nr/total_inference_duration:.2f}."
)
```
### Show Side-by-Side Video of Bicubic and Superresolution Version
```
if not comparison_video_path.exists():
raise ValueError("The comparison video does not exist.")
else:
video_link = FileLink(comparison_video_path)
video_link.html_link_str = "<a href='%s' download>%s</a>"
display(
HTML(
f"Showing side by side comparison. If you cannot see the video in "
"your browser, please click on the following link to download "
f"the video<br>{video_link._repr_html_()}"
)
)
display(Video(comparison_video_path, width=800, embed=True))
```
| true |
code
| 0.691966 | null | null | null | null |
|
**Reinforcement Learning with TensorFlow & TRFL: Q Learning**
* This notebook shows how to apply the classic Reinforcement Learning (RL) idea of Q learning with TRFL.
* In TD learning we estimated state values: V(s). In Q learning we estimate action values: Q(s,a). Here we'll go over Q learning in the simple tabular case. Next section we will use this same Q learning function in powerful Deep Learning algorithms like Deep Q Network.
* A key concept in RL is exploration. We'll introduce and use epsilon greedy exploration, which is often used with Q learning.
Outline:
1. Install TRFL
2. Define the GridWorld environment
3. Discuss Epsilon-Greedy Exploration
4. Find the value of each state-action value in the environment using Q learning
```
#TRFL has issues on Colab with TensorFlow version tensorflow-1.13.0rc1
#install TensorFlow 1.12 and restart run time
!pip install tensorflow==1.12
import os
os.kill(os.getpid(), 9)
#install TRFL
!pip install trfl==1.0
#install Tensorflow Probability
!pip install tensorflow-probability==0.5.0
```
**GridWorld**
The GridWorld environment is a four by four grid. The agent randomly starts on the grid and can move either up, left, right, or down. If the agent reaches the upper left or lower right the episode is over. Every action the agent takes gets a reward of -1 until you reach the upper left or over right.
```
#Environment from: https://github.com/dennybritz/reinforcement-learning/blob/cee9e78652f8ce98d6079282daf20680e5e17c6a/lib/envs/gridworld.py
#define the environment
import io
import numpy as np
import sys
from gym.envs.toy_text import discrete
import pprint
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
class GridworldEnv(discrete.DiscreteEnv):
"""
Grid World environment from Sutton's Reinforcement Learning book chapter 4.
You are an agent on an MxN grid and your goal is to reach the terminal
state at the top left or the bottom right corner.
For example, a 4x4 grid looks as follows:
T o o o
o x o o
o o o o
o o o T
x is your position and T are the two terminal states.
You can take actions in each direction (UP=0, RIGHT=1, DOWN=2, LEFT=3).
Actions going off the edge leave you in your current state.
You receive a reward of -1 at each step until you reach a terminal state.
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, shape=[4,4]):
if not isinstance(shape, (list, tuple)) or not len(shape) == 2:
raise ValueError('shape argument must be a list/tuple of length 2')
self.shape = shape
nS = np.prod(shape)
nA = 4
MAX_Y = shape[0]
MAX_X = shape[1]
P = {}
grid = np.arange(nS).reshape(shape)
it = np.nditer(grid, flags=['multi_index'])
while not it.finished:
s = it.iterindex
y, x = it.multi_index
# P[s][a] = (prob, next_state, reward, is_done)
P[s] = {a : [] for a in range(nA)}
is_done = lambda s: s == 0 or s == (nS - 1)
reward = 0.0 if is_done(s) else -1.0
#reward = 1.0 if is_done(s) else 0.0
# We're stuck in a terminal state
if is_done(s):
P[s][UP] = [(1.0, s, reward, True)]
P[s][RIGHT] = [(1.0, s, reward, True)]
P[s][DOWN] = [(1.0, s, reward, True)]
P[s][LEFT] = [(1.0, s, reward, True)]
# Not a terminal state
else:
ns_up = s if y == 0 else s - MAX_X
ns_right = s if x == (MAX_X - 1) else s + 1
ns_down = s if y == (MAX_Y - 1) else s + MAX_X
ns_left = s if x == 0 else s - 1
P[s][UP] = [(1.0, ns_up, reward, is_done(ns_up))]
P[s][RIGHT] = [(1.0, ns_right, reward, is_done(ns_right))]
P[s][DOWN] = [(1.0, ns_down, reward, is_done(ns_down))]
P[s][LEFT] = [(1.0, ns_left, reward, is_done(ns_left))]
it.iternext()
# Initial state distribution is uniform
isd = np.ones(nS) / nS
# We expose the model of the environment for educational purposes
# This should not be used in any model-free learning algorithm
self.P = P
super(GridworldEnv, self).__init__(nS, nA, P, isd)
def _render(self, mode='human', close=False):
""" Renders the current gridworld layout
For example, a 4x4 grid with the mode="human" looks like:
T o o o
o x o o
o o o o
o o o T
where x is your position and T are the two terminal states.
"""
if close:
return
outfile = io.StringIO() if mode == 'ansi' else sys.stdout
grid = np.arange(self.nS).reshape(self.shape)
it = np.nditer(grid, flags=['multi_index'])
while not it.finished:
s = it.iterindex
y, x = it.multi_index
if self.s == s:
output = " x "
elif s == 0 or s == self.nS - 1:
output = " T "
else:
output = " o "
if x == 0:
output = output.lstrip()
if x == self.shape[1] - 1:
output = output.rstrip()
outfile.write(output)
if x == self.shape[1] - 1:
outfile.write("\n")
it.iternext()
pp = pprint.PrettyPrinter(indent=2)
```
**An Introduction to Exploration: Epsilon-Greedy Exploration**
Exploration is a key concept in RL. In order to find the best policies, an agent needs to explore the environment. By exploring, the agent can experience new states and rewards. In the last notebook, the agent explored GridWorld by taking a random action at every step. While random action explorations can work in some environments, the downside is the agent can spend too much time exploring bad states or states that have already been explored fully and not enough time exploring promising states. A simple--yet surprisingly effective--approach to exploration is Epsilon-Greedy exploration. A epsilon percentage of the time, the agent chooses a random action. The remaining amount of the time (1-epsilon) the agent choose the best estimated action aka the* greedy action*. Epsilon can be a fixed value between 0 and 1 or can start at a high value and gradually decay over time (ie start at .99 and decay to 0.01). In this notebook we will used a fixed epsilon value of 0.1. Below is a simple example of epsilon-greedy exploration.
```
#declare the environment
env = GridworldEnv()
#reset the environment and get the agent's current position (observation)
current_state = env.reset()
env._render()
print("")
action_dict = {0:"UP",1:"RIGHT", 2:"DOWN",3:"LEFT"}
greedy_dict = {0:3,1:3,2:3,3:3,
4:0,5:0,6:0,7:0,
8:2,9:2,10:2,11:2,
12:1,13:1,14:1,15:1}
epsilon = 0.1
for i in range(10):
#choose random action epsilon amount of the time
if np.random.rand() < epsilon:
action = env.action_space.sample()
action_type = "random"
else:
#Choose a greedy action. We will learn greedy actions with Q learning in the following cells.
action = greedy_dict[current_state]
action_type = "greedy"
current_state,reward,done,info = env.step(action)
print("Agent took {} action {} and is now in state {} ".format(action_type, action_dict[action], current_state))
env._render()
print("")
if done:
print("Agent reached end of episode, resetting the env")
print(env.reset())
print("")
env._render()
print("")
```
** TRFL Usage **
Once again, the three main TRFL steps are:
1. In the TensorFlow graph, define the necessary TensorFlow tensors
2. In the graph, feed the tensors into the trfl method
3. In the TensorFlow session, run the graph operation
We saw this in the last notebook. Here in Q learning there are some slight differences. We use the trfl.qlearning() method and we input the action and action values (instead of state values) into the method. Note for the action values q_t and q_next_t the shape is batch size X number of actions.
```
#set up TRFL graph
import tensorflow as tf
import trfl
#https://github.com/deepmind/trfl/blob/master/docs/trfl.md#qlearningq_tm1-a_tm1-r_t-pcont_t-q_t-nameqlearning
# Args:
# q_tm1: Tensor holding Q-values for first timestep in a batch of transitions, shape [B x num_actions].
# a_tm1: Tensor holding action indices, shape [B].
# r_t: Tensor holding rewards, shape [B].
# pcont_t: Tensor holding pcontinue values, shape [B].
# q_t: Tensor holding Q-values for second timestep in a batch of transitions, shape [B x num_actions].
# name: name to prefix ops created within this op.
num_actions = env.action_space.n
batch_size = 1
q_t = tf.placeholder(dtype=tf.float32,shape=[batch_size,num_actions],name="q_value")
action_t = tf.placeholder(dtype=tf.int32,shape=[batch_size],name="action")
reward_t = tf.placeholder(dtype=tf.float32,shape=[batch_size],name='reward')
gamma_t = tf.placeholder(dtype=tf.float32,shape=[batch_size],name='discount_factor')
q_next_t= tf.placeholder(dtype=tf.float32,shape=[batch_size,num_actions],name='q_next_value')
qloss_t, q_extra_t = trfl.qlearning(q_t,action_t,reward_t,gamma_t,q_next_t)
```
** The RL Training Loop **
In the next cell we are going to define the training loop and then run it in the following cell. The goal is to estimate the action value of each state (the value of each state-action combination) using Q learning. action_value_array holds the estimated values. After each step the agent takes in the env, we update the action_value_array with the Q learning formula.
** TRFL Usage **
The TRFL usage here is to run the trfl operation q_learning_t in sess.run(). We then take the output (q_learning_output) and extract the td_error part of that tensor. Using the td_error we update the action_value_array. For reference, the code below shows the full output of trfl.qlearning and the classic RL method of performing tabular Q learning updates.
```
def q_learning_action_value_estimate(env,episodes=1000,alpha=0.05,discount_factor=1.0,epsilon=0.1):
"""
Args:
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
episodes: number of episodes to run
alpha: learning rate for state value updates
discount_factor: Gamma discount factor. pcont_t TRFL argument
Returns:
Value of each state with random policy
"""
with tf.Session() as sess:
#initialize the estimated state values to zero
action_value_array = np.zeros((env.nS,env.nA))
#reset the env
current_state = env.reset()
#env._render()
#run through each episode taking a random action each time
#upgrade estimated state value after each action
current_episode = 0
while current_episode < episodes:
#choose action based on epsilon-greedy policy
if np.random.rand() < epsilon:
eg_action = env.action_space.sample()
else:
#Choose a greedy action. We will learn greedy actions with Q learning in the following cells.
eg_action = np.argmax(action_value_array[current_state])
#take a step using epsilon-greedy action
next_state, rew, done, info = env.step(eg_action)
#run TRFL operation in the session
q_learning_output = sess.run([q_extra_t],feed_dict={q_t:np.expand_dims(action_value_array[current_state],axis=0),
action_t:np.expand_dims(eg_action,axis=0),
reward_t:np.expand_dims(rew,axis=0),
gamma_t:np.expand_dims(discount_factor,axis=0),
q_next_t:np.expand_dims(action_value_array[next_state],axis=0)})
# trfl.qlearning() returns:
# A namedtuple with fields:
# loss: a tensor containing the batch of losses, shape [B].
# extra: a namedtuple with fields:
# target: batch of target values for q_tm1[a_tm1], shape [B].
# td_error: batch of temporal difference errors, shape [B].
# Here we are using the td_error to update our action values. We will use the loss with a gradient descent optimizer in Deep Q Network session.
#Use the Q learning TD error to update estimated state-action values
action_value_array[current_state,eg_action] = action_value_array[current_state,eg_action] + alpha * q_learning_output[0].td_error
#For reference, here is the tabular Q learning update method
# max_q_value = np.max(action_value_array[next_state])
# action_value_array[current_state,eg_action] = action_value_array[current_state,eg_action] + \
# alpha * (rew + discount_factor*max_q_value - action_value_array[current_state,eg_action])
#if the epsiode is done, reset the env, if not the next state becomes the current state and the loop repeats
if done:
current_state = env.reset()
current_episode += 1
else:
current_state = next_state
return action_value_array
#run episodes with Q learning and get the state value estimates
action_values = q_learning_action_value_estimate(env,episodes=2000,alpha=0.1)
print("All Action Value Estimates:")
print(np.round(action_values.reshape((16,4)),1))
print("each row is a state, each column is an action")
print("")
optimal_action_estimates = np.max(action_values,axis=1)
print("Optimal Action Value Estimates:")
print(np.round(optimal_action_estimates.reshape(env.shape),1))
print("estimate of the optimal State value at each state")
print("")
```
The first output shows the estimated value for each action in each state. Ie row 4 column 4 is the value if the agent was in the upper right grid cell and took that action left. In the second output, we take the best action for each of the 16 states and show the agent's estimate of the state value assuming the agent always acts greedily.
```
```
| true |
code
| 0.645288 | null | null | null | null |
|
# Tensorboard example
```
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
```
Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
```
len(vocab)
def get_batches(arr, n_seqs, n_steps_per_seq):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the batch size and number of batches we can make
# ie n_seq = 10, n_steps_per_sew = 2, batch_size = 20
batch_size = n_seqs * n_steps_per_seq
# ie arr= 40, over 20, so 2 batches
n_batches = len(arr) // batch_size
# Keep only enough characters to make full batches
# n_batches = 2 * batch_size = 20 = 40??
# why not simply use len(arr)?
arr = arr[ : n_batches * batch_size]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps_per_seq):
# The features
x = arr[ :, n: n + n_steps_per_seq]
# The targets, shifted by one
y = np.zeros_like(x)
y[ :, : -1], y[ : , -1] = x[ :, 1: ], x[ :, 0]
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
with tf.name_scope('inputs'):
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, (batch_size, num_steps), name="inputs")
targets = tf.placeholder(tf.int32, (batch_size, num_steps), name="targets")
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def single_lstm_cell(lstm_size, keep_prob):
with tf.name_scope("RNN_layers"):
lstm = tf.contrib.rnn.NASCell(lstm_size, reuse = tf.get_variable_scope().reuse)
# Add dropout to the cell outputs
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob = keep_prob)
return drop
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Stack up multiple LSTM layers, for deep learning
with tf.name_scope("RNN_layers"):
rnn_cells = tf.contrib.rnn.MultiRNNCell([single_lstm_cell(lstm_size, keep_prob) for _ in range(num_layers)],
state_is_tuple = True)
with tf.name_scope("RNN_init_state"):
initial_state = rnn_cells.zero_state(batch_size, tf.float32)
return rnn_cells, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
# ie t1 = t1 = [[1, 2, 3], [4, 5, 6]]
# t2 = [[7, 8, 9], [10, 11, 12]]
# tf.concat([t1, t2], 1) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(lstm_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal( (in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros( out_size ))
# tensorboard
tf.summary.histogram("softmax_w", softmax_w)
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
tf.summary.histogram("predictions", out)
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape( y_one_hot, logits.get_shape() )
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
# tensorboard
tf.summary.scalar('loss', loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
x_one_hot = tf.one_hot(self.inputs, num_classes, name="x_one_hot")
with tf.name_scope("RNN_layers"):
# Build the LSTM cell
cells, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cells, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 64 # Sequences per batch
num_steps = 128 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
epochs = 3
# Save every N iterations
save_every_n = 200
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Tensoboard
train_writer = tf.summary.FileWriter('./logs/1/train', sess.graph)
test_writer = tf.summary.FileWriter('./logs/1/test')
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
merged = tf.summary.merge_all() # Tensorboard
summary, batch_loss, new_state, _ = sess.run([merged, model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
train_writer.add_summary(summary, counter)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
```
#### Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
```
tf.train.get_checkpoint_state('checkpoints')
```
## Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
```
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
```
Here, pass in the path to a checkpoint and sample from the network.
```
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
```
| true |
code
| 0.685213 | null | null | null | null |
|
[View in Colaboratory](https://colab.research.google.com/github/nishi1612/SC374-Computational-and-Numerical-Methods/blob/master/Set_3.ipynb)
Set 3
---
**Finding roots of polynomial by bisection method**
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
from google.colab import files
def iterations(n, arr , i):
plt.plot(range(n),arr)
plt.xlabel('No. of iterations')
plt.ylabel('Value of c')
plt.grid(True)
plt.savefig("Iterations" + str(i) + ".png")
files.download("Iterations" + str(i) + ".png")
plt.show()
def graph(i):
plt.xlabel('x')
plt.ylabel('y')
plt.grid(True)
plt.legend(loc='upper right')
plt.savefig("Graph" + str(i) + ".png")
files.download("Graph" + str(i) + ".png")
plt.show()
def bissection( a,b,epsilon,k):
table = pd.DataFrame(columns=['a','b','c','b-c','f(a)*f(c)','Assign'])
c = (a+b)/2;
dist = b-c;
i = 0
arr = []
while(dist>epsilon):
ans_a = func(a,k);
ans_b = func(b,k);
ans_c = func(c,k);
ans = ""
if(ans_a*ans_c < 0):
b=c;
ans = "b=c"
else:
a=c;
ans = "a=c";
table.loc[i] = [a,b,c,dist,ans_a*ans_c,ans]
arr.append(c)
i = i+1
c = (a+b) / 2
dist = b-c
return (a+b)/2 ,i , arr , table;
def func(x,k):
if k==1:
return x**6 - x - 1;
elif k==2:
return x**3 - x**2 - x - 1;
elif k==3:
return x - 1 - 0.3*math.cos(x);
elif k==4:
return 0.5 + math.sin(x) - math.cos(x);
elif k==5:
return x - math.e**(-x);
elif k==6:
return math.e**(-x) - math.sin(x);
elif k==7:
return x**3 - 2*x - 2;
elif k==8:
return x**4 - x - 1;
elif k==9:
return math.e**(x) - x - 2;
elif k==10:
return 1- x + math.sin(x);
elif k==11:
return x - math.tan(x);
x = np.arange(-2,3,0.001)
plt.plot(x,x**6,label='$x^6$')
plt.plot(x,x+1,label="x+1")
graph(1)
plt.plot(x**6-x-1,label='$x^6$ - x - 1')
graph(1)
a , n , arr , table = bissection(1,2,0.001,1)
iterations(n,arr,1)
print(str(a) + "\n" + str(func(a,1)))
table
b , n , arr , table = bissection(-1,0,0.001,1)
iterations(n,arr,1)
print(str(b) + "\n" + str(func(b,1)))
table
x = np.arange(-2,3,0.001)
plt.plot(x,x**3,label='$x^3$')
plt.plot(x,x**2 + x + 1,label='$x^2 + x + 1$')
graph(2)
plt.plot(x**3 - (x**2 + x + 1),label='$x^3 - x^2 - x - 1$')
graph(2)
a , n , arr, table = bissection(1,2,0.0001,2)
iterations(n,arr,2)
print(str(a) + "\n" + str(func(a,2)))
table
x = np.arange(-3,5,0.001)
plt.plot(x,x-1,label='$x-1$')
plt.plot(x,0.3*np.cos(x),label='$0.3cos(x)$')
graph(3)
plt.plot(x,x-1-0.3*np.cos(x) , label='$x - 1 - 0.3cos(x)$')
graph(3)
a , n , arr , table = bissection(0,2,0.0001,3)
iterations(n,arr,3)
print(str(a) + "\n" + str(func(a,3)))
table
x = np.arange(-10,10,0.001)
plt.plot(x,0.5 + np.sin(x),label='$0.5 + sin(x)$')
plt.plot(x,np.cos(x),label='$cos(x)$')
graph(4)
plt.plot(x,0.5 + np.sin(x) - np.cos(x),label='$0.5 + sin(x) - cos(x)$')
graph(4)
a , n , arr , table = bissection(0,2,0.0001,4)
iterations(n,arr,4)
print(str(a) + "\n" + str(func(a,4)))
table
x = np.arange(-0,5,0.001)
plt.plot(x,x,label='$x$')
plt.plot(x,np.e**(-x),label='$e^{-x}$')
graph(5)
plt.plot(x,x - np.e**(-x),label='$x - e^{-x}$')
graph(5)
a , n , arr , table = bissection(0,1,0.0001,5)
iterations(n,arr,5)
print(str(a) + "\n" + str(func(a,5)))
table
x = np.arange(0,5,0.001)
plt.plot(x,np.sin(x),label='$sin(x)$')
plt.plot(x,np.e**(-x),label='$e^{-x}$')
graph(6)
plt.plot(x,np.sin(x) - np.e**(-x),label='$sin(x) - e^{-x}$')
graph(6)
a , n , arr , table = bissection(0,1,0.0001,6)
iterations(n,arr,6)
print(str(a) + "\n" + str(func(a,6)))
table
a , n , arr , table = bissection(3,4,0.0001,6)
iterations(n,arr,6)
print(str(a) + "\n" + str(func(a,6)))
table
x = np.arange(-2,4,0.001)
plt.plot(x,x**3,label='$x^3$')
plt.plot(x,2*x+2,label='$2x + 2$')
graph(7)
plt.plot(x,x**3 - 2*x - 2,label='$x^3 - 2x - 2$')
graph(7)
a , n , arr , table = bissection(1,2,0.0001,7)
iterations(n,arr,7)
print(str(a) + "\n" + str(func(a,7)))
table
x = np.arange(-2,4,0.001)
plt.plot(x,x**4,label='$x^4$')
plt.plot(x,x+1,label='$x+1$')
graph(8)
plt.plot(x,x**4 - x - 1,label='$x^4 - x - 1$')
graph(8)
a , n , arr , table = bissection(-1,0,0.0001,8)
iterations(n,arr,8)
print(str(a) + "\n" + str(func(a,8)))
table
a , n , arr , table = bissection(1,2,0.0001,8)
iterations(n,arr,8)
print(str(a) + "\n" + str(func(a,8)))
table
x = np.arange(-5,4,0.001)
plt.plot(x,np.e**(x),label='$e^x$')
plt.plot(x,x+2,label='$x+2$')
graph(9)
plt.plot(x,np.e**(x) - x - 2,label='$e^2 - x - 2$')
graph(9)
a , n , arr , table = bissection(1,2,0.0001,9)
iterations(n,arr,9)
print(str(a) + "\n" + str(func(a,9)))
table
x = np.arange(-5,4,0.001)
plt.plot(x,-np.sin(x),label='$-sin(x)$')
plt.plot(x,1-x,label='$1 - x$')
graph(10)
plt.plot(x,-np.sin(x) - 1 + x,label='$-sin(x) - 1 + x$')
graph(10)
a , n , arr , table = bissection(0,2,0.0001,10)
iterations(n,arr,10)
print(str(a) + "\n" + str(func(a,10)))
table
x = np.arange(-10,10,.001)
plt.plot(np.tan(x),label='$tan(x)$')
plt.plot(x,label='$x$')
graph(11)
plt.plot(np.tan(x) - x,label='$x - tan(x)$')
graph(11)
a , n , arr , table = bissection(4,5,0.0001,11)
iterations(n,arr,11)
print(str(a) + "\n" + str(func(a,11)))
table
a , n , arr , table = bissection(80,120,0.0001,11)
iterations(n,arr,11)
print(str(a) + "\n" + str(func(a,11)))
table
```
| true |
code
| 0.225289 | null | null | null | null |
|
# Compiling and running C programs
As in [the example](https://github.com/tweag/funflow/tree/v1.5.0/funflow-examples/compile-and-run-c-files) in funflow version 1, we can construct a `Flow` which compiles and executes a C program. As in the older versions of this example, we will use the `gcc` Docker image to run our compilation step.
```
:opt no-lint
{-# LANGUAGE Arrows #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
-- Funflow libraries
import qualified Data.CAS.ContentStore as CS
import Funflow
( Flow,
dockerFlow,
ioFlow,
getDirFlow,
pureFlow,
putDirFlow,
runFlow,
)
import qualified Funflow.Tasks.Docker as DE
-- Other libraries
import Path (toFilePath, Abs, Dir, Path, File, absdir, parseAbsDir, relfile, reldir, (</>))
import System.Directory (getCurrentDirectory)
import System.Process (runCommand, ProcessHandle)
```
Similar to in Funflow version 1.x, inputs to Docker tasks are mounted in from the content store. This means that we need to copy our example c files to the content store before we can compile them:
```
-- | Helper for getting the absolute path to the src directory
srcDir :: () -> IO (Path Abs Dir)
srcDir _ = do
cwd <- getCurrentDirectory
cwdAbs <- parseAbsDir cwd
return $ cwdAbs </> [reldir|./src|]
-- | A `Flow` which copies the c sources to the content store
copyExampleToStore :: Flow () CS.Item
copyExampleToStore = proc _ -> do
exampleDir <- ioFlow srcDir -< ()
putDirFlow -< exampleDir
```
Now we can define a task which compiles the example C files using `gcc`:
```
config :: DE.DockerTaskConfig
config =
DE.DockerTaskConfig
{ DE.image = "gcc:9.3.0",
DE.command = "gcc",
DE.args = [ "/example/double.c", "/example/square.c", "/example/main.c"]
}
-- | Compile our C program and get the path to the output executable
compile :: Flow CS.Item CS.Item
compile = proc exampleItem -> do
-- Define a volume for the example directory
let exampleVolume = DE.VolumeBinding {DE.item = exampleItem, DE.mount = [absdir|/example/|]}
dockerFlow config -< DE.DockerTaskInput {DE.inputBindings = [exampleVolume], DE.argsVals = mempty}
```
And finally, we can construct our full Flow graph and execute it!
```
flow :: Flow Integer ProcessHandle
flow = proc input -> do
-- 1. Add the example to the content store
example <- copyExampleToStore -< ()
-- 2. Compile the C sources and get the path to the new executable
output <- compile -< example
outputDir <- getDirFlow -< output
exe <- pureFlow (\x -> toFilePath (x </> [relfile|a.out|])) -< outputDir
-- 3. Call the executable
command <- pureFlow (\(c, n) -> c <> " " <> show n) -< (exe, input)
ioFlow runCommand -< command
-- Our C program defined in `src/main.c` defines a function f(x) = 2*x + x^2
-- For input 3 this should output 15.
runFlow flow 3 :: IO ProcessHandle
```
| true |
code
| 0.625438 | null | null | null | null |
|
# 1millionwomentotech SummerOfCode
## Intro to AI: Week 4 Day 3
```
print(baby_train[50000]['reviewText'])
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
text = baby_train[50000]['reviewText']
for s in sent_tokenize(text):
print(s)
print(sia.polarity_scores(s))
def sia_features(dataset):
"""For each review text in the dataset, extract:
(1) the mean positive sentiment over all sentences
(2) the mean neutral sentiment over all sentences
(3) the mean negative sentiment over all sentences
(4) the maximum positive sentiment over all sentences
(5) the maximum neutral sentiment over all sentences
(6) the maximum negative sentiment over all sentences"""
feat_matrix = numpy.empty((len(dataset), 6))
for i in range(len(dataset)):
sentences = sent_tokenize(dataset[i]['reviewText'])
nsent = len(sentences)
if nsent:
sentence_polarities = numpy.empty((nsent, 3))
for j in range(nsent):
polarity = sia.polarity_scores(sentences[j])
sentence_polarities[j, 0] = polarity['pos']
sentence_polarities[j, 1] = polarity['neu']
sentence_polarities[j, 2] = polarity['neg']
feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis=0) # mean over the columns
feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis=0) # maximum over the columns
else:
feat_matrix[i, 0:6] = 0.0
return feat_matrix
sia_tr = sia_features(baby_train)
testmat = numpy.arange(12.).reshape((3, 4))
print(testmat)
print(numpy.max(testmat, axis=0))
print(numpy.mean(testmat, axis=1))
def len_features(dataset):
"""Add two features:
(1) length of review (in thousands of characters) - truncate at 2,500
(2) percentage of exclamation marks (in %)"""
feat_matrix = numpy.empty((len(dataset), 2))
for i in range(len(dataset)):
text = dataset[i]['reviewText']
feat_matrix[i, 0] = len(text) / 1000.
if text:
feat_matrix[i, 1] = 100. * text.count('!') / len(text)
else:
feat_matrix[i, 1] = 0.0
feat_matrix[feat_matrix>2.5] = 2.5
return feat_matrix
len_tr = len_features(baby_train)
print(X_train_neg.shape, sia_tr.shape, len_tr.shape)
X_train_augmented = numpy.concatenate((X_train_neg, sia_tr, len_tr), axis=1) # stack horizontally
lreg_augmented = LinearRegression().fit(X_train_augmented, Y_train)
pred_train_augmented = lreg_augmented.predict(X_train_augmented)
mae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train)
print("Now the mean absolute error on the training data is %f stars" % mae_train_augmented)
rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train)
rfpred_train_augmented = rf_augmented.predict(X_train_augmented)
mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train)
print("For the RF, it is %f stars" % mae_train_rf_augmented)
X_valid_neg = dataset_to_matrix_with_neg(baby_valid)
sia_valid = sia_features(baby_valid)
len_valid = len_features(baby_valid)
X_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid, len_valid), axis=1)
pred_valid_augmented = lreg_augmented.predict(X_valid_augmented)
pred_valid_rf_augmented = rf_augmented.predict(X_valid_augmented)
mae_valid_augmented = mean_absolute_error(pred_valid_augmented, Y_valid)
print("On the validation set, we get %f error for the linear regression" % mae_valid_augmented)
mae_valid_rf_augmented = mean_absolute_error(pred_valid_rf_augmented, Y_valid)
print("And %f for the random forest regression" % mae_valid_rf_augmented)
print(baby_train[50000]['reviewText'])
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
text = baby_train[50000]['reviewText']
for s in sent_tokenize(text):
print(s)
print(sia.polarity_scores(s))
def sia_features(dataset):
"""For each review text in the dataset, extract:
(1) mean positive sentiment over all sentences
(2) mean neutral sentiment over all sentences
(3) mean negative sentiment over all sentences
(4) maximum positive sentiment over all sentences
(5) maximum neutral sentiment over all sentences
(6) maximum negative sentiment over all sentences
"""
feat_matrix = numpy.empty((len(dataset), 6))
for i in range(len(dataset)):
sentences = sent_tokenize(dataset[i]['reviewText'])
nsent = len(sentences)
if nsent:
sentence_polarities = numpy.empty((nsent, 3))
for j in range(nsent):
polarity = sia.polarity_scores(sentences[j])
sentence_polarities[j, 0] = polarity['pos']
sentence_polarities[j, 1] = polarity['neu']
sentence_polarities[j, 2] = polarity['neg']
feat_matrix[i, 0:3] = numpy.mean(sentence_polarities, axis = 0) # mean over the columns
feat_matrix[i, 3:6] = numpy.max(sentence_polarities, axis = 0) # maximum over the columns
else:
feat_matrix[i, 0:6] = 0.0
return feat_matrix
sia_tr = sia_features(baby_train)
print(sia_tr[:10])
testmat = numpy.arange(12.).reshape((3,4))
print(testmat)
print(numpy.max(testmat, axis = 0))
print(numpy.mean(testmat, axis = 1))
# Homework - required for Certification
def len_features(dataset):
"""Add two features:
(1) length of review (in thousands of character) - truncate at 2,500
(2) percentage of exclamation marks (in %)
"""
len_tr = len_features(baby_train)
print(X_train_neg.shape, sia_tr.shape)
# stack horizontally
X_train_augmented = numpy.concatenate( (X_train_neg, sia_tr), axis = 1)
lreg_augmented = LinearRegression().fit(X_train_augmented, Y_train)
pred_train_augmented = lreg_augmented.predict(X_train_augmented)
mae_train_augmented = mean_absolute_error(pred_train_augmented, Y_train)
print("Now the mean absolute error on the training data is %f starts" % mae_train_augmented)
# random forest
rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train)
rfpred_train_augmented = rf_augmented.predict(X_train_augmented)
mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train)
print("For the RF, MAE is %f stars" % mae_train_rf_augmented)
X_valid_neg = dataset_to_matrix_with_neg(baby_valid)
sia_valid = sia_features(baby_valid)
# len_valid =
X_valid_augmented = numpy.concatenate((X_valid_neg, sia_valid), axis = 1)
pred_valid_augmented =
pred_valid_rfaugmented =
mae_valid_augmented =
mae_valid_rfaugmented =
```
# Homework for certification
Refactor the code above:
- "Be lazy. Not just lazy but proactively, agressively lazy." Remove duplication.
- create a single function that takes in data and spits out all success metrics across all of your algos.
# Where to go from here?
- unigrams (NLTK)
- word vector (gensim, [glove](https://nlp.stanford.edu/projects/glove/), word2vec)
- recurrent neural net
- convolutional neural net
https://www.oreilly.com/learning/perform-sentiment-analysis-with-lstms-using-tensorflow
http://www.wildml.com/2015/11/understanding-convolutional-neural-networks-for-nlp/
https://machinelearningmastery.com/develop-n-gram-multichannel-convolutional-neural-network-sentiment-analysis/
| true |
code
| 0.434521 | null | null | null | null |
|
# 自动求导的相关设置
- Tensor的属性:
- requires_grad=True
- 是否用来求导
- is_leaf:
- 叶子节点必须是计算的结果;
- 用户创建的Tensor的is_leaf=True(尽管requires_grad=True,也is_leaf=True);
- requires_grad=False的Tensor的is_leaf=True;
- grad_fn:
- 用来指定求导函数;
- grad
- 用来返回导数;
- dtype
- 只有torch.float的张量才能求导;
1. 求导的例子
```
import torch
# x自变量
x = torch.Tensor([5])
x.requires_grad=True
# y因变量
y = x ** 2
# 求导
y.backward()
# 导数的结果
print(x.grad)
```
2. 求导的可视化(导数函数的曲线)
```
%matplotlib inline
import matplotlib.pyplot as plt
import torch
# x自变量
x = torch.linspace(0, 10, 100)
x.requires_grad=True
# y因变量
y = (x - 5) ** 2 + 3
z = y.sum()
# 求导
z.backward()
print()
# 可视化
plt.plot(x.detach(), y.detach(), color=(1, 0, 0, 1), label='$y=(x-5)^2 + 3$')
plt.plot(x.detach(), x.grad.detach(), color=(1, 0, 1, 1), label='$y=2(x-5)$')
plt.legend()
plt.show()
# print(x.grad)
# print(x)
```
3. 求导相关的属性值
```
import torch
# x自变量
x = torch.Tensor([5])
x.requires_grad=True
# 求导前的属性
print("-------------求导前x")
print("leaf:", x.is_leaf)
print("grad_fn:", x.grad_fn)
print("grad:", x.grad)
# y因变量
y = x ** 2
print("-------------求导前y")
print("requires_grad:", y.requires_grad)
print("leaf:", y.is_leaf)
print("grad_fn:", y.grad_fn)
print("grad:", y.grad)
# 求导
y.backward() # 只对标量运算
print("-------------求导后x")
# 求导后的属性
print("leaf:", x.is_leaf)
print("grad_fn:", x.grad_fn)
print("grad:", x.grad)
print("-------------求导后y")
print("requires_grad:", y.requires_grad)
print("leaf:", y.is_leaf)
print("grad_fn:", y.grad_fn)
print("grad:", y.grad)
```
# Tensor的backward函数
## backward函数定义
- 函数定义:
```python
backward(self, gradient=None, retain_graph=None, create_graph=False)
```
- 参数说明:
- gradient=None:需要求导的微分张量;
- retain_graph=None:保留图;否则每次计算完毕,床创建的图都会被释放。
- create_graph=False:创建导数图,主要用来求高阶导数;
## 求导的通用模式
- 函数表达式:
- $z = 2x + 3y$
- 手工求导:
- $\dfrac{\partial{z}}{\partial{x}} = 2$
```
import torch
x = torch.Tensor([1, 2, 3])
x.requires_grad=True # 这个属性必须在 z = 2*x + 3*y 表达式构建图的时候设置
y = torch.Tensor([4, 5, 6])
z = 2*x + 3*y
z.backward(x) # 对x求导,得到的结果,自然是 2,但是x的grad是 2 * x
print(x.grad, y.grad, z.grad) # 没有对y求导,所以对y没有要求
```
## 理解导数
- 函数表达式:
- $z = x^2$
- 手工求导:
- $\dfrac{\partial{z}}{\partial{x}} = 2x$
- $\color{red}{上面过程怎么计算的呢?}$
### 结果张量为标量的情况
- 如果z是标量,则直接计算导数:$\dfrac{\partial{z}}{\partial{x}} = 2x$
```
import torch
x = torch.Tensor([2])
x.requires_grad=True
z = x**2 # 求导函数
z.backward() # 对x求导,2 * x ,导数为2x=4
print(x.grad, z.grad)
```
### 结果张量为向量的情况
- 如果z是向量,则需要先计算z与x的内积,得到标量结果,然后再求导。
- $z = x^2$
- $l = z \cdot x$
- $\dfrac{\partial{l}}{\partial{x}} = \dfrac{\partial{l}}{\partial{z}} \dfrac{\partial{z}}{\partial{x}} = x \dfrac{\partial{z}}{\partial{x}} = x 2x$
```
import torch
x = torch.Tensor([2])
x.requires_grad=True
y = x**2 # 求导函数
y.backward(x) # 2 x x = 8
print(x.grad, y.grad)
print(x.grad/x) # 正宗结果
```
### 取求导向量为1向量
- 根据上面的推导,在自动求导中包含几个默认动作:
- 1. 使用z.backward(),没有指定微分量的情况下,实际上是对图的所有标记为requires_grad=True的叶子张量实现求导;
- 当叶子节点都是requires_grad=False,会抛出异常。
- `RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn`
- 2. 使用z.backward(x),直接指定需要的求导;
- 其实这种指定,是没有意义的,因为指定x,也是对所有requires_grad=True的叶子节点求导。
- 下面例子体会下,多个叶子节点的自动求导;
- 就算只对x求导,实际对y也会求导;
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([4, 5, 6])
x.requires_grad=True
y.requires_grad=True
z = 3*x + 2*y # 求导函数
z.backward(x) # 对x求导
print(x.grad, y.grad) # [3., 6., 9.] :导数是3 与 [2., 4., 6.]:导数是2
print(x.grad/x, y.grad/x) # [3., 6., 9.] :导数是3 与 [2., 4., 6.]:导数是2
```
- 从上面例子看出:backward的参数张量,仅仅是把求导函数从向量转换成标量求导, 本身并没有指定对哪个变量(张量求导的)的含义。
- 由于backward的参数仅仅是向量到变量的转化工作,所以我们去这个参数为1即可。下面是推理理论。
- $z = x^2$
- $l = z \cdot 1$
- $\dfrac{\partial{l}}{\partial{x}} = \dfrac{\partial{l}}{\partial{z}} \dfrac{\partial{z}}{\partial{x}} = \dfrac{\partial{z \cdot 1 }}{\partial{z}} \dfrac{\partial{z}}{\partial{x}} = \dfrac{\partial{z}}{\partial{x}} = 2x$
- 取1张量作为梯度求导
```
import torch
x = torch.Tensor([1, 2, 3])
x.requires_grad=True
z = x**2 # 求导函数
z.backward(torch.ones_like(x))
print(x.grad, z.grad)
```
- 下面的操作与取1张量的原理完全一致
- 只是用户自己做了这个内积运算而已。
```
import torch
x = torch.Tensor([1, 2, 3])
x.requires_grad=True
z = (x**2).sum() # 直接求和
z.backward()
print(x.grad, z.grad)
```
## 复杂的求导运算例子
- 下面是计算的图示意图:
- 
```
import torch
# 叶子节点
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
# 中间节点
xy = x + y
xy2 = xy ** 2
z3 = z ** 3
xy2z3=xy2 * z3
# 求导数
xy2z3.backward(torch.Tensor([1.0, 1.0, 1.0]))
print(x.grad, y.grad, z.grad)
print(xy.grad, xy2.grad, z3.grad, xy2z3.grad) # 没有梯度,因为不是叶子节点
print(xy.grad_fn, xy2.grad_fn, z3.grad_fn, xy2z3.grad_fn)
print(xy.requires_grad, xy2.requires_grad, z3.requires_grad, xy2z3.requires_grad)
```
## 中间导数
- 使用上面模式编程,可以发现其中只计算出输入变量的导数,中间变量的导数是无法获取的,如果想获取中间变量的导数,需要注册一个回调钩子函数,通过这个函数返回。
- 获取中间变量导数的例子
```
import torch
# 叶子节点
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
# 中间节点
xy = x + y
# xyz = xy * z
# xyz.backward(torch.Tensor([1, 1, 1]))
xyz = torch.dot(xy, z)
# ====================
def get_xy_grad(grad):
print(F"xy的导数:{ grad }") # 可以保存到全局变量使用。
xy.register_hook(get_xy_grad)
# ====================
xyz.backward()
print(x.grad, y.grad, z.grad)
print(xy.grad, y.grad, z.grad)
```
## 高阶导数
1. 提供create_graph参数用来保留导数的图,用来实现高级导数的计算。
2. 高阶导数因为不是叶子节点,需要通过回调钩子获取
```
import torch
x = torch.Tensor([1])
x.requires_grad=True
z = x**6 # 求导函数
z.backward(create_graph=True) # retain_graph保留的是本身的运算图,create_graph是保留微分图
print(x.grad) # 导数3
# ====================
def get_xy_grad(grad):
print(F"x.grad的高阶导数:{ grad }") # 可以保存到全局变量使用。
x.register_hook(get_xy_grad)
# ====================
x.grad.backward(create_graph=True)
```
# Tensor的自动求导
- 有了上面的基础,下面看torch.autograd中的自动求导,就基本上非常简单。
- Torch提供了torch.autograd模块来实现自动求导,该模块暴露的调用如下:
- `['Variable', 'Function', 'backward', 'grad_mode']`
## backward的使用
- autograd提供的backward是Tensor的backward的静态函数版本,使用谈不上便捷,但多了一个选择;
```python
torch.autograd.backward(
tensors,
grad_tensors=None,
retain_graph=None,
create_graph=False,
grad_variables=None)
```
- 参数说明:
- tensors:被求导的向量(必须具有grad_fn);
- grad_tensors=None:梯度向量;
- retain_graph=None:保留计算图;
- create_graph=False:创建个高阶微分图(可以自己手工得到高阶导数,也可以使用下面的grad封装函数);
- grad_variables=None:兼容原来Variable版本的参数,在新的版本中不再使用。
- torch.autograd.backward函数的使用例子
- 参数grad_variables在我的这个版本中,已经不能使用。
```
import torch
# 叶子节点
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
# 中间节点
xy = x + y
# xyz = xy * z
# xyz.backward(torch.Tensor([1, 1, 1]))
xyz = torch.dot(xy, z)
# ====================
def get_xy_grad(grad):
print(F"xy的导数:{ grad }") # 可以保存到全局变量使用。
xy.register_hook(get_xy_grad)
# ====================
torch.autograd.backward(xyz)
print(x.grad, y.grad, z.grad)
print(xy.grad, y.grad, z.grad)
```
## grad的使用
- 用来计算输出关于输入的梯度的和,不是返回所有的梯度,而是对某个输入变量的求导:$\dfrac{\partial{z}}{\partial{x}}$
- 这个函数的功能应该与hook功能类似。
- grad函数的定义:
```python
torch.autograd.grad(
outputs,
inputs,
grad_outputs=None,
retain_graph=None,
create_graph=False,
only_inputs=True,
allow_unused=False)
```
- 参数说明:
- outputs:输出张量列表,与backward函数中的tensors作用一样;
- inputs:输入张量列表,用来调用register_hook的张量;
- grad_outputs:梯度张量列表,与backward函数中的grad_tensors作用一样;
- retain_graph:逻辑值,用来指定运算完毕是否清除计算图;
- create_graph:逻辑值,用来创建梯度的计算图(梯度的梯度就是高阶导数)
- only_inputs:逻辑值,用来指定返回的计算结果,不仅仅是inputs指定的张量,而是计算所有叶子节点的导数。默认值True:这个参数已经不推荐使用,而且已经没有作用了,向计算叶子节点的导数没使用backward函数。
- allow_unused:逻辑值,用来检测是否每个输入都用来计算输出,False表示不需要,True表示如果有输入没有用于输出计算,则抛出错误。如果没有输入都是用,则True与False结果都一样。默认值False
- grad的使用例子
```
import torch
# 叶子节点
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
# 中间节点
xy = x + y
xyz = torch.dot(xy, z)
# ====================
gd = torch.autograd.grad(xyz, x, retain_graph=True)
print(x.grad, y.grad, z.grad)
print(xy.grad, y.grad, z.grad)
print(gd)
print(torch.autograd.grad(xyz, xy,retain_graph=True))
print(torch.autograd.grad(xyz, y,retain_graph=True))
print(torch.autograd.grad(xyz, z,retain_graph=True, allow_unused=True))
# ====================
```
### grad的高阶求导
- 使用create_graph创建导数的图,并对导数再求导,从而实现高阶求导。
```
import torch
x = torch.Tensor([1])
x.requires_grad=True
z = x**6 # 求导函数
gd_1 = torch.autograd.grad(z, x, create_graph=True)
gd_2 = torch.autograd.grad(gd_1, x)
print(F"一阶导数:{gd_1},\n二阶导数: {gd_2}")
```
# 求导的控制
## set_grad_enabled类
- set_grad_enabled函数可以开启与关闭导数计算
- 一个上下文管理对象
- 函数声明如下:
```python
torch.autograd.set_grad_enabled(mode)
```
- 参数:
- mode:逻辑值,True开启,False关闭
### 通常使用例子
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
torch.autograd.set_grad_enabled(False) # 全局上下文
xy = x + y
xyz = torch.dot(xy, z)
torch.autograd.set_grad_enabled(True)
print(xy.requires_grad, xyz.requires_grad, z.requires_grad)
```
### 上下文使用例子
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
with torch.autograd.set_grad_enabled(False) as grad_ctx: # 局部上下文
xy = x + y # 块结束,作用范围自动结束
xyz = torch.dot(xy, z)
print(xy.requires_grad, xyz.requires_grad, z.requires_grad)
```
## enable_grad类
- 这个类是一个装饰器类,提供更加简捷的开启方式。
- 也是一个上下文管理器;
- 装饰器用于函数与类;
```python
torch.autograd.enable_grad()
```
### 装饰器使用例子
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
@ torch.autograd.enable_grad()
def func_xy(x, y):
return x + y # 块结束,作用范围自动结束
xy = func_xy(x, y)
xyz = torch.dot(xy, z)
print(xy.requires_grad, xyz.requires_grad, z.requires_grad)
```
### 上下文使用例子
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
with torch.autograd.enable_grad():
xy = x + y
xyz = torch.dot(xy, z)
print(xy.requires_grad, xyz.requires_grad, z.requires_grad)
```
## no_grad类
- 与enable_grad类一样的使用方式,作用却相反。
- 注意:
- no_grad与enable_grad是函数装饰器,不是类装饰器;
### 装饰器使用方式
- 对整个函数作用,适合函数模式,如果函数中有特殊的情况,可以嵌套使用。
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
@ torch.autograd.no_grad()
def func_xy(x, y):
return x + y # 块结束,作用范围自动结束
xy = func_xy(x, y)
xyz = torch.dot(xy, z)
print(xy.requires_grad, xyz.requires_grad, z.requires_grad)
```
### 上下文使用方式
- 适合于在非函数情况下使用
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
with torch.autograd.no_grad():
xy = x + y
xyz = torch.dot(xy, z)
print(xy.requires_grad, xyz.requires_grad, z.requires_grad)
```
### no_grad与enable_grad混合使用
- 这种混合使用,可以满足开发的任何情况的需求;
```
import torch
x = torch.Tensor([1, 2, 3])
y = torch.Tensor([3, 4, 5])
z = torch.Tensor([1, 2, 3])
x.requires_grad=True
y.requires_grad=True
z.requires_grad=True
with torch.autograd.no_grad():
xy = x + y
with torch.autograd.enable_grad():
z3 = z **3
xy2 = xy ** 2 # 因为xy的requires_grad=False,整个运算也是False
print(xy.requires_grad, z3.requires_grad, xy2.requires_grad)
```
----
| true |
code
| 0.38122 | null | null | null | null |
|
# lab2 Logisitic Regression
```
%matplotlib inline
import numpy as np
import matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as op
```
## 1. Load Data
```
data = pd.read_csv('ex2data1.txt')
X = np.array(data.iloc[:,0:2])
y = np.array(data.iloc[:,2])
print('X.shape = ' + str(X.shape))
print('y.shape = ' + str(y.shape))
def plotData(X, y):
k1 = (y==1)
k2 = (y==0)
plt.scatter(X[k1,0], X[k1,1], c='r',marker='+')
plt.scatter(X[k2,0], X[k2,1], c='b',marker='o')
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
plt.legend(['Admitted', 'Not admitted'])
plotData(X, y)
plt.show()
# 在X左侧添加全1的列
m = X.shape[0]
n = X.shape[1]
X = np.hstack((np.ones((m,1)), X))
print('X.shape = ' + str(X.shape))
ini_theta = np.zeros((n+1, 1))
```
## 2. Cost and Gradient
$$
g(z)=\frac{1}{1+e^{-z}}
$$
$$
J(\theta)=\frac{1}{m}\sum_{i=1}^{m}[-y^{(i)}log(h_\theta(x^{(i)}))-(1-y^{(i)})log(1-h_\theta(x^{(i)}))]
$$
$$
\frac{\partial J(\theta)}{\partial\theta_j}=\frac{1}{m}\sum_{i=1}^{m} [(h_\theta(x^{(i)})-y^{(i)})x^{(i)}_j]
$$
```
def sigmoid(z):
return 1 / (1+np.exp(-z))
def gradient(theta, X, y):
'''compute gradient
args:
X - X.shape = (m,n)
theta - theta.shape = (n,1)
y - y.shape = (m,1)
return:
grade - the gradient
'''
m = X.shape[0]
n = X.shape[1]
theta = theta.reshape((n,1))
y = y.reshape((m,1))
h = sigmoid(np.dot(X, theta))
tmp = np.sum((h-y)*X, axis=0) / m
grade = tmp.reshape(theta.shape)
return grade
def costFunction(theta, X, y):
'''compute cost
args:
X - X.shape = (m,n)
theta - theta.shape = (n,1)
y - y.shape = (m,1)
return:
J - the cost
'''
m = X.shape[0]
n = X.shape[1]
theta = theta.reshape((n,1))
y = y.reshape((m,1))
h = sigmoid(np.dot(X, theta))
term1 = y * np.log(h)
term2 = (1-y) * np.log(1-h)
J = sum(- term1 - term2) / m
return J
grade = gradient(ini_theta, X, y)
cost= costFunction(ini_theta, X, y)
print('cost = ' + str(cost))
grade
test_theta = [[-24], [0.2], [0.2]]
test_theta = np.array(test_theta)
grade = gradient(test_theta, X, y)
cost = costFunction(test_theta, X, y)
print('cost = ' + str(cost))
grade
```
## 3. predict
这里使用scipy中的替代优化器
```
result = op.minimize(fun=costFunction, x0=ini_theta, args=(X, y), method='TNC', jac=gradient)
optimal_theta = result.x
optimal_theta
def plotDecisionBoundary(theta, X, y):
'''绘制边界直线
'''
plotData(X[:,1:3], y)
plot_x = np.array([np.min(X[:,1])-2, np.max(X[:,1])+2])
# theta0 + theta1 * x1 + theta2 * x2 == 0
# 代入sigmoid函数
# g(z) = 1/2 是判断1和0的阈值
plot_y = -1 / theta[2] * (theta[1]*plot_x + theta[0])
plt.plot(plot_x, plot_y)
plotDecisionBoundary(optimal_theta, X, y)
plt.show()
def predict(theta, X):
m = X.shape[0]
pred = np.zeros((m,1))
h = sigmoid(np.dot(X, theta))
pred[h>=0.5] = 1
return pred.flatten()
prob = np.array([1, 45, 85])
prob = sigmoid(np.dot(prob, optimal_theta))
prob
# 计算准确率,这里的mean函数使用巧妙
p = predict(optimal_theta, X)
print('Train accuracy = {}%'.format(100 * np.mean(p==y)) )
```
| true |
code
| 0.465509 | null | null | null | null |
|
# Inference
This notebook is dedicated to testing and visualizing results for both the wiki and podcast datasets
Note:
Apologies for the gratuitous warnings. Tensorflow is aware of these issues and has rectified them in later versions of TensorFlow. Unfortunately, they persist for version 1.13.
```
from src.SliceNet import SliceNet
from src.netUtils import getSingleExample
import matplotlib.pyplot as plt
from pathlib import Path
import numpy as np
import pandas as pd
import seaborn as sns
import random
import math
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if type(tf.contrib) != type(tf): tf.contrib._warning = None
%load_ext autoreload
%autoreload 2
# Choose whether to use the base network or the network with self-attention
attention = True
# Current best networks
best_base_wiki = '/home/bmmidei/SliceCast/models/04_20_2019_2300_final.h5'
best_base_podcast = '/home/bmmidei/SliceCast/models/04_26_2019_1000_podcast.h5'
best_attn_wiki = '/home/bmmidei/SliceCast/models/05_03_2019_0800_attn.h5'
best_attn_podcast = '/home/bmmidei/SliceCast/models/05_02_2019_2200_attn_podcast.h5'
if attention:
weights_wiki = best_attn_wiki
weights_podcast = best_attn_podcast
else:
weights_wiki = best_base_wiki
weights_podcast = best_base_podcast
net = SliceNet(classification=True,
class_weights=[1.0,10,0.2],
attention=attention)
```
## Sample predictions on unseen wiki articles
Note that this section relies on wikipedia
```
dataPath = Path('/home/bmmidei/SliceCast/data/wiki-sample/')
files = [str(x) for x in dataPath.glob('**/*') if x.suffix=='.hdf5']
mask = random.sample(range(0,len(files)), 1)
# randomly select a file to test
test_file = [x for (i,x) in enumerate(files) if i in mask][0]
k = 4
num_samples = 16
preds, labels, pk = net.predict(test_file=test_file,
num_samples=num_samples,
weights_path=weights_wiki,
k=k)
print('Average PK score with k={} on {} examples is: {:0.3f}'.format(k, num_samples, pk))
np.set_printoptions(suppress=True)
preds = np.argmax(preds, axis=2)
labels = np.argmax(labels, axis=2)
# Choose the index of the document you want to examine
idx = 2
# You can keep running this cell with different indices to visualize different
# documents within this batch of testing
# Note: The graph displays n sentences where n is the length of the longest
# document in the batch. As such, there may be padding sections at the beginning
# of the document with label and prediction of value 2
df = pd.DataFrame()
df['preds'] = preds[idx,:]
df['labels'] = labels[idx,:]
df['sent_number'] = df.index
fig, axes = plt.subplots(nrows=2, ncols=1)
df.plot(x='sent_number', y='preds', figsize=(10,5), grid=True, ax=axes[0])
df.plot(x='sent_number', y='labels', figsize=(10,5), grid=True, ax=axes[1], color='green')
```
## Sample predictions on unseen podcast data
```
test_file = '/home/bmmidei/SliceCast/data/podcasts/hdf5/batch0_0.hdf5'
k = 33
num_samples = 2
preds, labels, pk = net.predict(test_file=test_file,
num_samples=num_samples,
weights_path=weights_podcast,
k=k)
print('Average PK score with k={} on {} examples is: {:0.3f}'.format(k, num_samples, pk))
np.set_printoptions(suppress=True)
preds = np.argmax(preds, axis=2)
labels = np.argmax(labels, axis=2)
# Choose the document you want to examine
idx = 1
df = pd.DataFrame()
df['preds'] = preds[idx,:]
df['labels'] = labels[idx,:]
df['sent_number'] = df.index
fig, axes = plt.subplots(nrows=2, ncols=1)
df.plot(x='sent_number', y='preds', figsize=(10,5), grid=True, ax=axes[0])
df.plot(x='sent_number', y='labels', figsize=(10,5), grid=True, ax=axes[1], color='green')
```
## Predictions on a single text file
```
text_file = '/home/bmmidei/SliceCast/data/podcasts/with_timestamps/joe1254.txt'
is_labeled = True
weights_path = weights_podcast # transfer learning
sents, labels = getSingleExample(fname=text_file, is_labeled=is_labeled)
sents = np.expand_dims(sents, axis=0)
preds = net.singlePredict(sents, weights_path=weights_path)
# Place data into a pandas dataframe for analysis
df = pd.DataFrame()
preds = np.argmax(np.squeeze(preds), axis=-1)
df['raw_sentences'] = sents[0]
if is_labeled:
df['labels'] = labels
df['preds'] = preds
df['sent_number'] = df.index
fig, axes = plt.subplots(nrows=2, ncols=1)
df.plot(x='sent_number', y='preds', figsize=(10,5), grid=True, ax=axes[0])
df.plot(x='sent_number', y='labels', figsize=(10,5), grid=True, ax=axes[1], color='green')
```
## Keyword Extraction
The following cells are experimental code to extract keywords for each segment in order to provide context for each segment.
```
from src.postprocess import getSummaries, getTimeStamps
import nltk
nltk.download('stopwords')
keywords = getSummaries(sents[0], preds)
stamps = getTimeStamps(sents[0], '/home/bmmidei/SliceCast/data/podcasts/with_timestamps/joe1254.json', preds)
seconds = [x%60 for x in stamps]
minutes = [math.floor(x/60) for x in stamps]
for i, (x, y)in enumerate(zip(minutes, seconds)):
print("{}:{}".format(x, y), end="")
print([x[0] for x in keywords[i]])
```
| true |
code
| 0.763241 | null | null | null | null |
|
```
from plot_helpers import *
from source_files_extended import load_sfm_depth, load_aso_depth, load_classifier_data
figure_style= dict(figsize=(8, 6))
aso_snow_depth_values = load_aso_depth()
sfm_snow_depth_values = load_sfm_depth(aso_snow_depth_values.mask)
```
## SfM snow depth distribution
```
data = [
{
'data': sfm_snow_depth_values,
'label': 'SfM',
'color': 'brown',
}
]
with Histogram.plot(data, (-5, 5), **figure_style) as ax:
ax
```
## Positive snow depth comparison
```
data = [
{
'data': aso_snow_depth_values,
'label': 'ASO',
'color': 'dodgerblue',
},
{
'data': np.ma.masked_where(sfm_snow_depth_values <= 0.0, sfm_snow_depth_values, copy=True),
'label': 'SfM',
'color': 'brown',
}
]
with Histogram.plot(data, (0, 5), **figure_style) as ax:
ax
```
## Pixel Classification
```
casi_classification = load_classifier_data(aso_snow_depth_values.mask)
casi_classes, classes_count = np.unique(casi_classification, return_counts=True)
non_snow_casi = np.ma.masked_where(casi_classification == 1, casi_classification, copy=True)
assert classes_count[1:4].sum() == np.count_nonzero(~non_snow_casi.mask)
```
## ASO non-snow pixels depth values
```
data = [
{
'data': np.ma.masked_where(non_snow_casi.mask, aso_snow_depth_values, copy=True),
'label': 'ASO',
'color': 'dodgerblue',
}
]
with Histogram.plot(data, (0, 5), **figure_style) as ax:
ax
```
## CASI snow pixels snow depth values
```
data = [
{
'data': np.ma.masked_where(~non_snow_casi.mask, aso_snow_depth_values, copy=True),
'label': 'ASO',
'color': 'steelblue',
},
{
'data': np.ma.masked_where(~non_snow_casi.mask, sfm_snow_depth_values, copy=True),
'label': 'SfM',
'color': 'beige',
'alpha': 0.7,
}
]
with Histogram.plot(data, (0, 5), **figure_style) as ax:
ax.axvline(x=0.08, linestyle='dotted', color='dimgrey', label='ASO Precision')
```
## SfM positive values
```
data = [
{
'data': np.ma.masked_where(sfm_snow_depth_values < 0, aso_snow_depth_values, copy=True),
'label': 'ASO',
'color': 'steelblue',
},
{
'data': np.ma.masked_where(sfm_snow_depth_values < 0, sfm_snow_depth_values, copy=True),
'label': 'SfM',
'color': 'beige',
'alpha': 0.7,
}
]
with Histogram.plot(data, (0, 5), **figure_style) as ax:
ax.axvline(x=0.08, linestyle='dotted', color='dimgrey', label='ASO Precision')
ax.set_title('SfM positive area snow depth values');
```
| true |
code
| 0.685502 | null | null | null | null |
|
# **Numba**
### Numba is a JIT Compiler and uses LLVM internally - No compilation required !

```
import time
def get_time_taken(func, *args):
res = func(*args)
start = time.time()
func(*args)
end = time.time()
time_taken = end - start
print(f"Total time - {time_taken:.5f} seconds")
print(res)
from numba import jit
from math import tan, atan
@jit
def slow_function(n):
result = 0
for x in range(n ** 7):
result += tan(x) * atan(x)
return result
get_time_taken(slow_function, 10)
```
### The speed up is obvious but there are a lot of caveats
### For example, any function used must also be "decorated"
```
from numba import jit, int32
@jit(int32(int32), nopython=True)
def func(x):
return tan(x) * atan(x)
@jit(int32(int32), nopython=True)
def slow_function(n):
result = 0
for x in range(n ** 7):
result += func(x)
return result
get_time_taken(slow_function, 10)
```
### Notice the slight overhead
```
from numba import prange,jit, int32
@jit(int32(int32), nopython=True, parallel=True)
def slow_function(n):
result = 0
for x in prange(n ** 7):
result += tan(x) * atan(x)
return result
get_time_taken(slow_function, 10)
```
### prange is the parallel version of the range function in python and parallel=True option optimizes the code to use all the cores
### Lets see how it works with Numpy
```
from numba import jit, int32
import numpy as np
@jit(int32(int32), nopython=True)
def slow_func_in_numpy(n):
result = 0
for x in np.arange(n ** 7):
result += np.tan(x) * np.arctan(x)
return result
get_time_taken(slow_func_in_numpy, 10)
```
### Do I have to write functions for every type?
```
from numba import jit, int32, int64, float32, float64
from math import tan, atan
@jit([int32(int32), int64(int64), float32(float32), float64(float64)])
def slow_function(n):
result = 0
for x in range(n ** 7):
result += tan(x) * atan(x)
return result
get_time_taken(slow_function, 10)
get_time_taken(slow_function, 10.2)
```
### Let's see how we can create numpy ufuncs using numba
```
from numba import vectorize, int32, int64, float32, float64
import numpy as np
@vectorize([int32(int32, int32),
int64(int64, int64),
float32(float32, float32),
float64(float64, float64)])
def addfunc(x, y):
return x + y
@vectorize
def simpler_addfunc(x, y):
return x + y
addfunc(2, 3)
addfunc(6.42, 9.8)
simpler_addfunc(2, 3.4)
simpler_addfunc(np.array([1,2,3]), np.array([4,5,6]))
```
### Limited support for classes
```
from numba import jitclass
spec = [
('x', int32),
('y', int32)
]
@jitclass(spec)
class Node(object):
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, n):
return (self.x - n.x) ** 2 + (self.y - n.y) ** 2
def distance_from_point(self, x, y):
return (self.x - x) ** 2 + (self.y - y) ** 2
n1 = Node(3,2)
n2 = Node(9,6)
%time n1.distance(n2)
%time n1.distance_from_point(4,5)
```
### This is just a glance into what numba can do, but remember, it does come with its own limitations
Numba Limitations
=================
1. No Strings Support
2. No support for exception handling (try .. except, try .. finally)
3. No support for context management (the with statement)
4. list comprehension is supported, but not dict, set or generator comprehensions
5. No support for generator delegation (yield from)
raise and assert are supported
# **Exercise**
Try using numba's @jit decorator with the function you wrote earlier and check with %time if there is any improvement in the performance
**If you find any improvement, feel free to tweet about your experience with the handle @pyconfhyd**
| true |
code
| 0.5083 | null | null | null | null |
|
<img src="./pictures/DroneApp_logo.png" style="float:right; max-width: 180px; display: inline" alt="INSA" /></a>
<img src="./pictures/logo_sizinglab.png" style="float:right; max-width: 100px; display: inline" alt="INSA" /></a>
# Frame design
The objective of this study, is to optimize the overall design in terms of mass. For this target, the frame will be sized to withstand the resulting loads of two sizing scenarios: the **maximum take-off thrust (arms)** and a **landing with an impact speed of 1m/s (body,arms, landing gears)**. Due to the great diversity of existing models of drones in the
market, a simple design of quad-copter was considered for further calculations and steps
**Scipy** and **math** packages will be used for this notebook in order to illustrate the optimization algorithms of python.
```
import scipy
import scipy.optimize
from math import pi
from math import sqrt
from math import sin,cos,tan
import math
import numpy as np
import timeit
import pandas as pd
import ipywidgets as widgets
from ipywidgets import interactive
from IPython.display import display, HTML
pd.options.display.float_format = '{:,.2f}'.format
```
#### Frame drawing
*Simplified design of the drone frame and nomenclature of geometrical parameters used.*
<img src="./img/FrameDesign.jpg" alt="4-arms drone structure" width="800"/>
## Sizing scenarios
### Take-Off scenario
A maximum force produced at the take-off $F_{TO}$ generates a bending moment $M_{TO}$ equivalent to:
$M_{TO}=\frac{F_{TO}\cdot L_{arm}}{N_{arms}}$
The maximum stress $\sigma_{max}$ for a beam of rectangular cross-section is estimated with safety coefficient $k_s$ as:
$\displaystyle\sigma_{max}=\frac{H_{arm}}{2} \frac{12 \cdot Thrust \cdot l_{arm}}{H_{arm}^4-(H_{arm}-2e)^4} \leq \frac{\sigma_{alloy}}{k_s}$
which can be written with dimensionless arm aspect ratio $\pi_{arm}=\frac{e}{H_{arm}}$:
$\displaystyle H_{arm}\geq \left ( \frac{6 \cdot Thrust \cdot l_{arm} \cdot k_s}{\sigma_{alloy}(1-(1-2 \cdot \pi_{arm})^4)} \right )^{\frac{1}{3}}$
### Crash sizing scenario
The crash sizing scenario considers a maximum speed $V_{impact}$ of the drone when hitting the ground. At such speed the structure should resist (i.e. the maximum stress should not be exceeded) and for higher speeds, the landing
gears are the parts that break as structural fuses.
To calculate the equivalent maximum load resisted by the landing gears, the energy conservation law applies the kinetic energy stored in drone mass to potential energy in structural parts transitory deformation:
\begin{equation}
\begin{gathered}
\frac{1}{2}k_{eq} \cdot \delta x^2= \frac{1}{2} M_{tot} \cdot V_{impact}^2 \\
\Rightarrow F_{max} =\frac{1}{4}( k_{eq} \cdot \delta x + M_{total} \cdot g)=\frac{1}{4}(V_{impact} \cdot \sqrt{k_{eq}M_{total}} + M_{total} \cdot g)
\end{gathered}
\end{equation}
To calculate the maximum stress induced by the maximum load $F_{max}$ applied to one landing gear, the equivalent stiffness $k_{eq}$ should be determined. For this purpose, the problem is broken down into simpler structural parts and the equivalent stiffness $k_{eq}$ is expressed considering the effect of each stiffness on the whole part.
\begin{equation}
k_{eq} = 4 \cdot \frac{\overset{\sim}{k_1} \cdot \overset{\sim}{k_2}}{\overset{\sim}{k_1}+\overset{\sim}{k_2}}
\end{equation}
*Equivalent stiffness problem decomposition.*
<img src="./img/crash.jpg" alt="Equivalent stiffness problem" width="800"/>
## Sizing Code
The set of equations of a sizing code can generate typical issues such :
- Underconstrained set of equations: the lacking equations can come from additional scenarios, estimation models or additional sizing variable.
- overconstrained equations often due to the selection of a component on multiple critera: the adding of over-sizing coefficients and constraints in the optimization problem can generally fix this issue
- algebraic loops often due to selection criteria requiring informations generally available after the selection
**Underconstraint singularities** Example: two variables in one equation:
- Equation: cross section side of a beam resisting a normal stress: $\displaystyle H=\sqrt[3]{\frac{6*M_{to}}{\sigma_{bc}*(1-(1-2*T)^4)}}$
- Variables: thickness ($T$), cross section side ($H$)
- Geometrical restriction:$\displaystyle T<H$
- Strategy: $\displaystyle T=k_{TH}*H$ where 0<$k_{TH}$<1
The equation is thus transformed into an inequality and through a large number of iterations the value of both variables can be estimated.
$\displaystyle H>\sqrt[3]{\frac{6*M_{to}}{\sigma_{bc}*(1-(1-2*k_{TH})^4)}}$
**Algebraic loop** : beta and Hlg to fulfill objective and contraints.
The final optimization problem depends thus of these parameters:
- $k_{TH}$: aspect ratio : ratio thickness (T) / side of the beam (H) < 1. Underconstraint
- $k_{BH}$ aspect ratio : ratio body height (Hbody)/ height beam (H) > 1. Underconstraint
- $ \theta$ landing gear angle (0 is vertical beam) 0<Teta<90. Algebraic Loop
- $k_{TT}$ ratio landing gear thickness ( body side dimensions). Underconstraint
- $k_{L}$ aspect ratio: Length body(Lbody)/length arm (Larm). Underconstraint
- $Hlg$: Height of landing gear (space for battery or sensors). Algebraic Loop
The sizing code is defined here in a function which can give:
- an evaluation of the objective: here the frame mass
- an evaluation of the constraints: here the normal stress at the landing gear and body core, battery dimensions.
**Restrictions applied**:
1. **Strength of Materials (two constraints):** the stress resisted by the components(arm, body, landing gear), $\sigma_j$ must be lower than the maximum material stress.
2. **Geometry (one constraint)**: Volume of the body must be larger than the battery one's.
3. **Geometry (one constraint)**: The landing gear must be higher than the deformation caused during the impact and a possible camera or body hanging on the drone.
## Parameters definition
### General specifications
```
# Input Geometrical dimensions
Larm=0.35 # [m] one arm length
Narm=4 # [-] arms number
VolBat=0.132*0.043*0.027 #[m^3] Volume Battery (https://www.miniplanes.fr/eflite-accu-lipo-4s-148v-3300mah-50c-prise-ec3)
# Specifications for take off
F_to=32 # [N] global drone force for the take off
M_total=2 # [kg] total drone mass
# Specifications for landing impact
v_impact=1 # [m/s] impact speed
#Payload specifications
H_camera=0.057#[m] height camera
```
### Material assumptions
```
# Material properties
# for beeam and core
Ey_bc=70.3e9 # [Pa] Young modulus
Rho_bc=2700 # [kg/m^3] Volumic mass
Sigma_bc=80e6 # [Pa] Elastic strength
# for landing gear
Ey_lg=2e9 # [Pa] Young modulus
Rho_lg=1070 # [kg/m^3] Volumic mass
Sigma_lg=39e6 # [Pa] Elastic strength
```
### Design assumptions (constant)
```
k_sec=4 # [-] security coefficient
```
### Design variable (to optimize)
```
k_TH=0.1 # [-] aspect ratio : ratio thickness (T) / side of the beam (H) < 1
k_BH=2 # [-] aspect ratio : ratio body height (Hbody)/ height beam (H) > 1
Teta=20/90*pi/2 # [rad] landing gear angle (0 is vertical beam) 0<Teta<90
k_TT=1 # [-] aspect ratio : ratio landing gear thickness (Tlg)/ thickness beam (T). > 1
k_L=0.5 # [-] aspect ratio: Length body(Lbody)/length arm (Larm)<1
Hlg=.1 # [m] Height of landing gear (space for battery or sensors)
#Vector of parameters
parameters= scipy.array((k_TH,k_BH,Teta,k_TT,k_L,Hlg))
# Optimization bounds
# k_TH, k_BH, Theta, k_TT, k_L, H_LG
bounds = [(0.15,0.4), (1,4), (30/90*pi/2,pi/2), (1,100), (0,1), (0.01,1.165)]
```
<a id='#section5'></a>
```
def SizingCode(param,arg):
#Design Variables
k_TH=param[0]
k_BH=param[1]
Teta=param[2]
k_TT=param[3]
k_L=param[4]
Hlg=param[5]
#### Beam Sizing - Take Off
M_to=F_to/Narm*Larm*k_sec # [N.m] Moment applied in the drone center
# H=(M_to/Sigma_bc/(1-(1-2*k_TH)**4))**(1/3) # [m] Side length of the beam
H=(6*M_to/Sigma_bc/(1-(1-2*k_TH)**4))**(1/3) # [m] Side length of the beam
T=k_TH*H # [m] Thickness of the side beam
#### Body and Landing gear sizing - Landing impact
# Body stiffness calculation
Hbody=k_BH*H # [m] height of the body
Ibody=1/12*((H+2*T)*Hbody**3-H*(Hbody-2*T)**3) # [m^4] Section inertia of the body
Lbody=k_L*Larm #[m] length of the body
K1=3*Ey_bc*Ibody/(Lbody)**3 # [N/m] equivalent stiffness of the body
# Landing gear stiffness calculation
Llg=Hlg/cos(Teta) # [m] Landing gear length
Tlg=k_TT*T # [m] landing gear thickness
Ilg=1/12*(Tlg**4) # [m^4] Section inertia of the landing gear rectangular section
K2=3*Ey_lg*Ilg/Llg**3/sin(Teta) # [N/m] equivalent stiffness of the landing gear
# Global stiffness
Kg=K1*K2/(K1+K2)*Narm # [N/m] global stiffness of all the arms
# Impact force
Fimpact= (v_impact*(Kg*M_total)**(1/2)+M_total*9.81)*k_sec # [N] Total impact force, we assume all the landing gear impact together
# Stress calculation in the landing gear
M_LG=Fimpact/Narm*Hlg*tan(Teta) # [N.m] Moment applied in the landing gear
Sigma_lg_impact=M_LG*(Tlg/2)/Ilg # [Pa] Max stress in the landing gear
# Stress calculation in the body
M_Body=(Fimpact/Narm*Lbody+M_LG) # [N.m] Moment applied in the body
Sigma_body_impact=M_Body*(Hbody/2)/Ibody # [Pa] Max stress in the landing gear
# Mass calculation
Mbeams=Narm*Larm*(H**2-(H-2*T)**2)*Rho_bc #[kg] Total beams' mass
MLG=Narm*Llg*Tlg**2*Rho_lg #[kg] Total landing gears' mass
Mbody=Narm*(Lbody)*(Hbody*(H+2*T)-(Hbody-2*T)*H)*Rho_bc #[kg] Total body's mass
Mframe=Mbeams+MLG+Mbody #[kg] total frame mass
Vbody=(2*Lbody)**2*Hbody #[m^3] volume body to integer battery
# Contraintes : stress
constraints = [(Sigma_bc-Sigma_body_impact)/Sigma_body_impact,(Sigma_lg-Sigma_lg_impact)/Sigma_lg_impact,(Vbody-VolBat)/VolBat,(Hlg-Fimpact/(Narm*Kg)-H_camera)/(Hlg)]
# Objectif : masse totale
if arg=='Obj':
return Mframe
elif arg == 'ObjP':
P = 0. # Penalisation nulle
for C in constraints:
if (C < 0.):
P = P-1e9*C
return Mframe + P #mass optimizatin
elif arg=='Prt':
col_names_opt = ['Type', 'Name', 'Min', 'Value', 'Max', 'Unit', 'Comment']
df_opt = pd.DataFrame()
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_TH', 'Min': bounds[0][0], 'Value': k_TH, 'Max': bounds[0][1], 'Unit': '[-]', 'Comment': 'Aspect ratio for the beam\'s thickness (T/H), '}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_BH', 'Min': bounds[1][0], 'Value': k_BH, 'Max': bounds[1][1], 'Unit': '[-]', 'Comment': 'Aspect ratio for the body\'s height (Hbody/H)'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Theta', 'Min': bounds[2][0], 'Value': Teta/pi*180, 'Max': bounds[2][1], 'Unit': '[-]', 'Comment': 'Angle of the landing gear w.r.t. the beam'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_TT', 'Min': bounds[3][0], 'Value': k_TT, 'Max': bounds[3][1], 'Unit': '[-]', 'Comment': 'Aspect ratio for the Landing gear\'s thickness (Tlg/T)'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'k_L', 'Min': bounds[4][0], 'Value': k_L, 'Max': bounds[4][1], 'Unit': '[-]', 'Comment': 'Aspect ratio: Length body(Lbody)/length arm (Larm) k_L'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Hlg', 'Min': bounds[5][0], 'Value': Hlg, 'Max': bounds[5][1], 'Unit': '[-]', 'Comment': 'Landing gear height'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Mbeams', 'Min': 0, 'Value': Mbeams, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total beams mass'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'MLG', 'Min': 0, 'Value': MLG, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total landing gear mass'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Mbody', 'Min': 0, 'Value': Mbody, 'Max': '-', 'Unit': '[kg]', 'Comment': 'Total body mass'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 0', 'Min': 0, 'Value': constraints[0], 'Max': '-', 'Unit': '[-]', 'Comment': 'Stress margin at the Body: (Sigma_bc-Sigma_body_impact)/Sigma_body_impact'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 1', 'Min': 0, 'Value': constraints[1], 'Max': '-', 'Unit': '[-]', 'Comment': 'Stress margin at the landing gears: (Sigma_lg-Sigma_lg_impact)/Sigma_lg_impact'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 2', 'Min': 0, 'Value': constraints[2], 'Max': '-', 'Unit': '[-]', 'Comment': '(Vbody-VolBat)/VolBat'}])[col_names_opt]
df_opt = df_opt.append([{'Type': 'Optimization', 'Name': 'Const 3', 'Min': 0, 'Value': constraints[3], 'Max': '-', 'Unit': '[-]', 'Comment': '(Hlg-Fimpact/(Narm*Kg)-H_camera)/(Hlg)'}])[col_names_opt]
col_names = ['Type', 'Name', 'Value', 'Unit', 'Comment']
df = pd.DataFrame()
df = df.append([{'Type': 'Arm', 'Name': 'Larm', 'Value': Larm, 'Unit': '[m]', 'Comment': 'Arm length'}])[col_names]
df = df.append([{'Type': 'Arm', 'Name': 'H', 'Value': H, 'Unit': '[m]', 'Comment': 'Height beam'}])[col_names]
df = df.append([{'Type': 'Arm', 'Name': 'T', 'Value': T, 'Unit': '[m]', 'Comment': 'Thickness arm'}])[col_names]
df = df.append([{'Type': 'Body', 'Name': 'Lbody', 'Value': Lbody, 'Unit': '[m]', 'Comment': 'Body length'}])[col_names]
df = df.append([{'Type': 'Body', 'Name': 'Hbody', 'Value': Hbody, 'Unit': '[m]', 'Comment': 'Body height'}])[col_names]
df = df.append([{'Type': 'Body', 'Name': 'H+2*T', 'Value': H+2*T, 'Unit': '[m]', 'Comment': 'Body width'}])[col_names]
df = df.append([{'Type': 'Crash', 'Name': 'v_impact', 'Value': v_impact, 'Unit': '[m/s]', 'Comment': 'Crash speed'}])[col_names]
df = df.append([{'Type': 'Crash', 'Name': 'Kg', 'Value': Kg, 'Unit': '[N/m]', 'Comment': 'Global stiffness'}])[col_names]
df = df.append([{'Type': 'Crash', 'Name': 'k_sec', 'Value': k_sec, 'Unit': '[-]', 'Comment': 'Safety coef.'}])[col_names]
df = df.append([{'Type': 'Crash', 'Name': 'Fimpact', 'Value': Fimpact, 'Unit': '[N]', 'Comment': 'Max crash load'}])[col_names]
pd.options.display.float_format = '{:,.3f}'.format
def view(x=''):
#if x=='All': return display(df)
if x=='Optimization' : return display(df_opt)
return display(df[df['Type']==x])
items = sorted(df['Type'].unique().tolist())+['Optimization']
w = widgets.Select(options=items)
return display(df,df_opt)
else:
return constraints
```
<a id='#section6'></a>
## Optimization problem
We will now use the [optimization algorithms](https://docs.scipy.org/doc/scipy/reference/optimize.html) of the Scipy package to solve and optimize the configuration. We use here the SLSQP algorithm without explicit expression of the gradient (Jacobian). A course on Multidisplinary Gradient optimization algorithms and gradient optimization algorithm is given [here](http://mdolab.engin.umich.edu/sites/default/files/Martins-MDO-course-notes.pdf):
> Joaquim R. R. A. Martins (2012). A Short Course on Multidisciplinary Design Optimization. University of Michigan
We can print of the characterisitcs of the problem before optimization with the initial vector of optimization variables:
```
# Initial characteristics before optimization
print("-----------------------------------------------")
print("Initial characteristics before optimization :")
SizingCode(parameters,'Prt')
print("-----------------------------------------------")
# Optimization with SLSQP algorithm
contrainte = lambda x: SizingCode(x, 'Const')
objectif = lambda x: SizingCode(x, 'Obj')
objectifP = lambda x: SizingCode(x, 'ObjP')
SLSQP = False # Optimization algorithm choice
if SLSQP == True:
# SLSQP omptimisation
result = scipy.optimize.fmin_slsqp(func=objectif, x0=parameters,
bounds=bounds,
f_ieqcons=contrainte, iter=1500, acc=1e-12)
else:
# Differential evolution omptimisation
result = scipy.optimize.differential_evolution(func=objectifP,
bounds=bounds,
tol=1e-12)
# Final characteristics after optimization
print("-----------------------------------------------")
print("Final characteristics after optimization :")
if SLSQP == True:
SizingCode(result,'Obj')
SizingCode(result, 'Prt')
else:
SizingCode(result.x,'Obj')
SizingCode(result.x, 'Prt')
print("-----------------------------------------------")
```
| true |
code
| 0.379781 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/cxbxmxcx/EatNoEat/blob/master/Chapter_9_EatNoEat_Training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import tensorflow as tf
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
import math
import glob
import pickle
import io
import os
import datetime
import base64
from IPython.display import HTML
from IPython import display as ipythondisplay
from google.colab import drive
drive.mount('/content/gdrive')
use_NAS = False
if use_NAS:
IMG_SIZE = 224 # 299 for Inception, 224 for NASNetMobile
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
else:
IMG_SIZE = 299 # 299 for Inception, 224 for NASNetMobile
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (IMG_SIZE, IMG_SIZE))
if use_NAS:
img = tf.keras.applications.nasnet.preprocess_input(img)
else:
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
def create_model(image_batch):
tf.keras.backend.clear_session()
if use_NAS:
# Create the base model from the pre-trained model
base_model = tf.keras.applications.NASNetMobile(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
else:
# Create the base model from the pre-trained model
base_model = tf.keras.applications.InceptionResNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
feature_batch = base_model(image_batch)
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
prediction_layer = tf.keras.layers.Dense(3)
prediction_batch = prediction_layer(feature_batch_average)
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer])
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Nadam(lr=base_learning_rate),
loss=tf.keras.losses.MeanAbsoluteError(),
metrics=['mae', 'mse', 'accuracy'])
return model
import os
from os import listdir
my_drive = '/content/gdrive/My Drive/'
image_folder = my_drive + 'TestImages/'
models = my_drive + 'Models'
training_folder = my_drive + "Traning/"
def get_test_images(directory):
images = []
for file in listdir(directory):
if file.endswith(".jpg"):
images.append(image_folder + file)
return images
images = get_test_images(image_folder)
print(images)
if len(images) < 0:
raise Exception('Test images need to be loaded!')
else:
x, _ = load_image(images[0])
img = x[np.newaxis, ...]
food_model = create_model(img)
food_model.summary()
latest = tf.train.latest_checkpoint(models)
latest
if latest != None:
food_model.load_weights(latest)
def observe_image(image, model):
x, _ = load_image(image)
img = x[np.newaxis, ...]
return model.predict(img)
import ipywidgets as widgets
from IPython.display import display
from IPython.display import Javascript
test_states = []
#@title Eat/No Eat Training { run: "auto", vertical-output: true, display-mode: "form" }
image_idx = 19 #@param {type:"slider", min:0, max:100, step:1}
val = f"Images Trained {len(test_states)}"
label = widgets.Label(
value= val,
disabled=False
)
display(label)
cnt = len(images)
image_idx = image_idx if image_idx < cnt else cnt - 1
image = images[image_idx]
x, _ = load_image(image)
img = x[np.newaxis, ...]
predict = food_model.predict(img)
print(predict+5)
print(image_idx,image)
plt.imshow((x+1)/2)
toggle = widgets.ToggleButtons(
options=['Eat', 'No Eat'],
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
# icon='check'
)
display(toggle)
button = widgets.Button(description="Train!")
output = widgets.Output()
def button_clicked(b):
# Display the message within the output widget.
with output:
test = (predict,toggle.index,image)
test_states.append(test)
button.on_click(button_clicked)
display(button, output)
if len(test_states) > 0:
if os.path.isdir(training_folder) == False:
os.makedirs(training_folder)
pickle.dump( test_states, open( training_folder + "food_test.p", "wb" ) )
```
| true |
code
| 0.574454 | null | null | null | null |
|
# Recurrent Neural Networks (RNN) with Keras
## Learning Objectives
1. Add built-in RNN layers.
2. Build bidirectional RNNs.
3. Using CuDNN kernels when available.
4. Build a RNN model with nested input/output.
## Introduction
Recurrent neural networks (RNN) are a class of neural networks that is powerful for
modeling sequence data such as time series or natural language.
Schematically, a RNN layer uses a `for` loop to iterate over the timesteps of a
sequence, while maintaining an internal state that encodes information about the
timesteps it has seen so far.
The Keras RNN API is designed with a focus on:
- **Ease of use**: the built-in `keras.layers.RNN`, `keras.layers.LSTM`,
`keras.layers.GRU` layers enable you to quickly build recurrent models without
having to make difficult configuration choices.
- **Ease of customization**: You can also define your own RNN cell layer (the inner
part of the `for` loop) with custom behavior, and use it with the generic
`keras.layers.RNN` layer (the `for` loop itself). This allows you to quickly
prototype different research ideas in a flexible way with minimal code.
Each learning objective will correspond to a __#TODO__ in the notebook where you will complete the notebook cell's code before running. Refer to the [solution](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/text_classification/solutions/rnn.ipynb) for reference.
## Setup
```
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
```
## Built-in RNN layers: a simple example
There are three built-in RNN layers in Keras:
1. `keras.layers.SimpleRNN`, a fully-connected RNN where the output from previous
timestep is to be fed to next timestep.
2. `keras.layers.GRU`, first proposed in
[Cho et al., 2014](https://arxiv.org/abs/1406.1078).
3. `keras.layers.LSTM`, first proposed in
[Hochreiter & Schmidhuber, 1997](https://www.bioinf.jku.at/publications/older/2604.pdf).
In early 2015, Keras had the first reusable open-source Python implementations of LSTM
and GRU.
Here is a simple example of a `Sequential` model that processes sequences of integers,
embeds each integer into a 64-dimensional vector, then processes the sequence of
vectors using a `LSTM` layer.
```
model = keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# Add a LSTM layer with 128 internal units.
# TODO -- your code goes here
# Add a Dense layer with 10 units.
# TODO -- your code goes here
model.summary()
```
Built-in RNNs support a number of useful features:
- Recurrent dropout, via the `dropout` and `recurrent_dropout` arguments
- Ability to process an input sequence in reverse, via the `go_backwards` argument
- Loop unrolling (which can lead to a large speedup when processing short sequences on
CPU), via the `unroll` argument
- ...and more.
For more information, see the
[RNN API documentation](https://keras.io/api/layers/recurrent_layers/).
## Outputs and states
By default, the output of a RNN layer contains a single vector per sample. This vector
is the RNN cell output corresponding to the last timestep, containing information
about the entire input sequence. The shape of this output is `(batch_size, units)`
where `units` corresponds to the `units` argument passed to the layer's constructor.
A RNN layer can also return the entire sequence of outputs for each sample (one vector
per timestep per sample), if you set `return_sequences=True`. The shape of this output
is `(batch_size, timesteps, units)`.
```
model = keras.Sequential()
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)
model.add(layers.GRU(256, return_sequences=True))
# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)
model.add(layers.SimpleRNN(128))
model.add(layers.Dense(10))
model.summary()
```
In addition, a RNN layer can return its final internal state(s). The returned states
can be used to resume the RNN execution later, or
[to initialize another RNN](https://arxiv.org/abs/1409.3215).
This setting is commonly used in the
encoder-decoder sequence-to-sequence model, where the encoder final state is used as
the initial state of the decoder.
To configure a RNN layer to return its internal state, set the `return_state` parameter
to `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU`
only has one.
To configure the initial state of the layer, just call the layer with additional
keyword argument `initial_state`.
Note that the shape of the state needs to match the unit size of the layer, like in the
example below.
```
encoder_vocab = 1000
decoder_vocab = 2000
encoder_input = layers.Input(shape=(None,))
encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(
encoder_input
)
# Return states in addition to output
output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")(
encoder_embedded
)
encoder_state = [state_h, state_c]
decoder_input = layers.Input(shape=(None,))
decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(
decoder_input
)
# Pass the 2 states to a new LSTM layer, as initial state
decoder_output = layers.LSTM(64, name="decoder")(
decoder_embedded, initial_state=encoder_state
)
output = layers.Dense(10)(decoder_output)
model = keras.Model([encoder_input, decoder_input], output)
model.summary()
```
## RNN layers and RNN cells
In addition to the built-in RNN layers, the RNN API also provides cell-level APIs.
Unlike RNN layers, which processes whole batches of input sequences, the RNN cell only
processes a single timestep.
The cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a
`keras.layers.RNN` layer gives you a layer capable of processing batches of
sequences, e.g. `RNN(LSTMCell(10))`.
Mathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact,
the implementation of this layer in TF v1.x was just creating the corresponding RNN
cell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM`
layers enable the use of CuDNN and you may see better performance.
There are three built-in RNN cells, each of them corresponding to the matching RNN
layer.
- `keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.
- `keras.layers.GRUCell` corresponds to the `GRU` layer.
- `keras.layers.LSTMCell` corresponds to the `LSTM` layer.
The cell abstraction, together with the generic `keras.layers.RNN` class, make it
very easy to implement custom RNN architectures for your research.
## Cross-batch statefulness
When processing very long sequences (possibly infinite), you may want to use the
pattern of **cross-batch statefulness**.
Normally, the internal state of a RNN layer is reset every time it sees a new batch
(i.e. every sample seen by the layer is assumed to be independent of the past). The
layer will only maintain a state while processing a given sample.
If you have very long sequences though, it is useful to break them into shorter
sequences, and to feed these shorter sequences sequentially into a RNN layer without
resetting the layer's state. That way, the layer can retain information about the
entirety of the sequence, even though it's only seeing one sub-sequence at a time.
You can do this by setting `stateful=True` in the constructor.
If you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g.
```
s1 = [t0, t1, ... t100]
s2 = [t101, ... t201]
...
s16 = [t1501, ... t1547]
```
Then you would process it via:
```python
lstm_layer = layers.LSTM(64, stateful=True)
for s in sub_sequences:
output = lstm_layer(s)
```
When you want to clear the state, you can use `layer.reset_states()`.
> Note: In this setup, sample `i` in a given batch is assumed to be the continuation of
sample `i` in the previous batch. This means that all batches should contain the same
number of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100,
sequence_B_from_t0_to_t100]`, the next batch should contain
`[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.
Here is a complete example:
```
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
output = lstm_layer(paragraph3)
# reset_states() will reset the cached state to the original initial_state.
# If no initial_state was provided, zero-states will be used by default.
# TODO -- your code goes here
```
### RNN State Reuse
<a id="rnn_state_reuse"></a>
The recorded states of the RNN layer are not included in the `layer.weights()`. If you
would like to reuse the state from a RNN layer, you can retrieve the states value by
`layer.states` and use it as the
initial state for a new layer via the Keras functional API like `new_layer(inputs,
initial_state=layer.states)`, or model subclassing.
Please also note that sequential model might not be used in this case since it only
supports layers with single input and output, the extra input of initial state makes
it impossible to use here.
```
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
existing_state = lstm_layer.states
new_lstm_layer = layers.LSTM(64)
new_output = new_lstm_layer(paragraph3, initial_state=existing_state)
```
## Bidirectional RNNs
For sequences other than time series (e.g. text), it is often the case that a RNN model
can perform better if it not only processes sequence from start to end, but also
backwards. For example, to predict the next word in a sentence, it is often useful to
have the context around the word, not only just the words that come before it.
Keras provides an easy API for you to build such bidirectional RNNs: the
`keras.layers.Bidirectional` wrapper.
```
model = keras.Sequential()
# Add Bidirectional layers
# TODO -- your code goes here
model.summary()
```
Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the
`go_backwards` field of the newly copied layer, so that it will process the inputs in
reverse order.
The output of the `Bidirectional` RNN will be, by default, the concatenation of the forward layer
output and the backward layer output. If you need a different merging behavior, e.g.
concatenation, change the `merge_mode` parameter in the `Bidirectional` wrapper
constructor. For more details about `Bidirectional`, please check
[the API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional/).
## Performance optimization and CuDNN kernels
In TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNN
kernels by default when a GPU is available. With this change, the prior
`keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build your
model without worrying about the hardware it will run on.
Since the CuDNN kernel is built with certain assumptions, this means the layer **will
not be able to use the CuDNN kernel if you change the defaults of the built-in LSTM or
GRU layers**. E.g.:
- Changing the `activation` function from `tanh` to something else.
- Changing the `recurrent_activation` function from `sigmoid` to something else.
- Using `recurrent_dropout` > 0.
- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner
`tf.while_loop` into an unrolled `for` loop.
- Setting `use_bias` to False.
- Using masking when the input data is not strictly right padded (if the mask
corresponds to strictly right padded data, CuDNN can still be used. This is the most
common case).
For the detailed list of constraints, please see the documentation for the
[LSTM](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM/) and
[GRU](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU/) layers.
### Using CuDNN kernels when available
Let's build a simple LSTM model to demonstrate the performance difference.
We'll use as input sequences the sequence of rows of MNIST digits (treating each row of
pixels as a timestep), and we'll predict the digit's label.
```
batch_size = 64
# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).
# Each input sequence will be of size (28, 28) (height is treated like time).
input_dim = 28
units = 64
output_size = 10 # labels are from 0 to 9
# Build the RNN model
def build_model(allow_cudnn_kernel=True):
# CuDNN is only available at the layer level, and not at the cell level.
# This means `LSTM(units)` will use the CuDNN kernel,
# while RNN(LSTMCell(units)) will run on non-CuDNN kernel.
if allow_cudnn_kernel:
# The LSTM layer with default options uses CuDNN.
lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim))
else:
# Wrapping a LSTMCell in a RNN layer will not use CuDNN.
lstm_layer = keras.layers.RNN(
keras.layers.LSTMCell(units), input_shape=(None, input_dim)
)
model = keras.models.Sequential(
[
lstm_layer,
keras.layers.BatchNormalization(),
keras.layers.Dense(output_size),
]
)
return model
```
Let's load the MNIST dataset:
```
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
sample, sample_label = x_train[0], y_train[0]
```
Let's create a model instance and train it.
We choose `sparse_categorical_crossentropy` as the loss function for the model. The
output of the model has shape of `[batch_size, 10]`. The target for the model is an
integer vector, each of the integer is in the range of 0 to 9.
```
model = build_model(allow_cudnn_kernel=True)
# Compile the model
# TODO -- your code goes here
model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
```
Now, let's compare to a model that does not use the CuDNN kernel:
```
noncudnn_model = build_model(allow_cudnn_kernel=False)
noncudnn_model.set_weights(model.get_weights())
noncudnn_model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
noncudnn_model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
```
When running on a machine with a NVIDIA GPU and CuDNN installed,
the model built with CuDNN is much faster to train compared to the
model that uses the regular TensorFlow kernel.
The same CuDNN-enabled model can also be used to run inference in a CPU-only
environment. The `tf.device` annotation below is just forcing the device placement.
The model will run on CPU by default if no GPU is available.
You simply don't have to worry about the hardware you're running on anymore. Isn't that
pretty cool?
```
import matplotlib.pyplot as plt
with tf.device("CPU:0"):
cpu_model = build_model(allow_cudnn_kernel=True)
cpu_model.set_weights(model.get_weights())
result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)
print(
"Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label)
)
plt.imshow(sample, cmap=plt.get_cmap("gray"))
```
## RNNs with list/dict inputs, or nested inputs
Nested structures allow implementers to include more information within a single
timestep. For example, a video frame could have audio and video input at the same
time. The data shape in this case could be:
`[batch, timestep, {"video": [height, width, channel], "audio": [frequency]}]`
In another example, handwriting data could have both coordinates x and y for the
current position of the pen, as well as pressure information. So the data
representation could be:
`[batch, timestep, {"location": [x, y], "pressure": [force]}]`
The following code provides an example of how to build a custom RNN cell that accepts
such structured inputs.
### Define a custom cell that supports nested input/output
See [Making new Layers & Models via subclassing](https://www.tensorflow.org/guide/keras/custom_layers_and_models/)
for details on writing your own layers.
```
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
super(NestedCell, self).__init__(**kwargs)
def build(self, input_shapes):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
i1 = input_shapes[0][1]
i2 = input_shapes[1][1]
i3 = input_shapes[1][2]
self.kernel_1 = self.add_weight(
shape=(i1, self.unit_1), initializer="uniform", name="kernel_1"
)
self.kernel_2_3 = self.add_weight(
shape=(i2, i3, self.unit_2, self.unit_3),
initializer="uniform",
name="kernel_2_3",
)
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
input_1, input_2 = tf.nest.flatten(inputs)
s1, s2 = states
output_1 = tf.matmul(input_1, self.kernel_1)
output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = (output_1, output_2_3)
new_states = (state_1, state_2_3)
return output, new_states
def get_config(self):
return {"unit_1": self.unit_1, "unit_2": unit_2, "unit_3": self.unit_3}
```
### Build a RNN model with nested input/output
Let's build a Keras model that uses a `keras.layers.RNN` layer and the custom cell
we just defined.
```
unit_1 = 10
unit_2 = 20
unit_3 = 30
i1 = 32
i2 = 64
i3 = 32
batch_size = 64
num_batches = 10
timestep = 50
cell = NestedCell(unit_1, unit_2, unit_3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((None, i1))
input_2 = keras.Input((None, i2, i3))
outputs = rnn((input_1, input_2))
model = keras.models.Model([input_1, input_2], outputs)
model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
```
### Train the model with randomly generated data
Since there isn't a good candidate dataset for this model, we use random Numpy data for
demonstration.
```
input_1_data = np.random.random((batch_size * num_batches, timestep, i1))
input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3))
target_1_data = np.random.random((batch_size * num_batches, unit_1))
target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3))
input_data = [input_1_data, input_2_data]
target_data = [target_1_data, target_2_data]
model.fit(input_data, target_data, batch_size=batch_size)
```
With the Keras `keras.layers.RNN` layer, You are only expected to define the math
logic for individual step within the sequence, and the `keras.layers.RNN` layer
will handle the sequence iteration for you. It's an incredibly powerful way to quickly
prototype new kinds of RNNs (e.g. a LSTM variant).
For more details, please visit the [API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN/).
| true |
code
| 0.656383 | null | null | null | null |
|
# Naive forecasting
## Setup
```
import numpy as np
import matplotlib.pyplot as plt
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
```
## Trend and Seasonality
```
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000.
```
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
```
## Naive Forecast
```
naive_forecast = series[split_time - 1:-1]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, naive_forecast, label="Forecast")
```
Let's zoom in on the start of the validation period:
```
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, start=0, end=150, label="Series")
plot_series(time_valid, naive_forecast, start=1, end=151, label="Forecast")
```
You can see that the naive forecast lags 1 step behind the time series.
Now let's compute the mean absolute error between the forecasts and the predictions in the validation period:
```
errors = naive_forecast - x_valid
abs_errors = np.abs(errors)
mae = abs_errors.mean()
mae
```
That's our baseline, now let's try a moving average.
| true |
code
| 0.670339 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/ghost331/Recurrent-Neural-Network/blob/main/Covid_19_Analysis_using_RNN_with_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#Data: https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
country = "India"
#Total COVID confirmed cases
df_confirmed = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
df_confirmed_country = df_confirmed[df_confirmed["Country/Region"] == country]
df_confirmed_country = pd.DataFrame(df_confirmed_country[df_confirmed_country.columns[4:]].sum(),columns=["confirmed"])
df_confirmed_country.index = pd.to_datetime(df_confirmed_country.index,format='%m/%d/%y')
df_confirmed_country.plot(figsize=(10,5),title="COVID confirmed cases")
df_confirmed_country.tail(10)
print("Total days in the dataset", len(df_confirmed_country))
#Use data until 14 days before as training
x = len(df_confirmed_country)-14
train=df_confirmed_country.iloc[300:x]
test = df_confirmed_country.iloc[x:]
##scale or normalize data as the data is too skewed
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(train)
train_scaled = scaler.transform(train)
test_scaled = scaler.transform(test)
## Use TimeSeriestrain_generator to generate data in sequences.
#Alternatively we can create our own sequences.
from keras.preprocessing.sequence import TimeseriesGenerator
#Sequence size has an impact on prediction, especially since COVID is unpredictable!
seq_size = 7 ## number of steps (lookback)
n_features = 1 ## number of features. This dataset is univariate so it is 1
train_generator = TimeseriesGenerator(train_scaled, train_scaled, length = seq_size, batch_size=1)
print("Total number of samples in the original training data = ", len(train)) # 660
print("Total number of samples in the generated data = ", len(train_generator)) #653 with seq_size=7
#Check data shape from generator
x,y = train_generator[10] #Check train_generator
#Takes 7 days as x and 8th day as y (for seq_size=7)
#Also generate test data
test_generator = TimeseriesGenerator(test_scaled, test_scaled, length=seq_size, batch_size=1)
print("Total number of samples in the original training data = ", len(test)) # 14 as we're using last 14 days for test
print("Total number of samples in the generated data = ", len(test_generator)) # 7
#Check data shape from generator
x,y = test_generator[0]
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
#Define Model
model = Sequential()
model.add(LSTM(128, activation='relu', return_sequences=True, input_shape=(seq_size, n_features)))
model.add(LSTM(64, activation='relu'))
model.add(Dense(32))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
print('Train...')
history = model.fit_generator(train_generator,
validation_data=test_generator,
epochs=30, steps_per_epoch=10)
#plot the training and validation accuracy and loss at each epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
#forecast
prediction = [] #Empty list to populate later with predictions
current_batch = train_scaled[-seq_size:] #Final data points in train
current_batch = current_batch.reshape(1, seq_size, n_features) #Reshape
## Predict future, beyond test dates
future = 7 #Days
for i in range(len(test) + future):
current_pred = model.predict(current_batch)[0]
prediction.append(current_pred)
current_batch = np.append(current_batch[:,1:,:],[[current_pred]],axis=1)
### Inverse transform to before scaling so we get actual numbers
rescaled_prediction = scaler.inverse_transform(prediction)
time_series_array = test.index #Get dates for test data
#Add new dates for the forecast period
for k in range(0, future):
time_series_array = time_series_array.append(time_series_array[-1:] + pd.DateOffset(1))
#Create a dataframe to capture the forecast data
df_forecast = pd.DataFrame(columns=["actual_confirmed","predicted"], index=time_series_array)
df_forecast.loc[:,"predicted"] = rescaled_prediction[:,0]
df_forecast.loc[:,"actual_confirmed"] = test["confirmed"]
#Plot
df_forecast.plot(title="Predictions for next 7 days")
```
| true |
code
| 0.707487 | null | null | null | null |
|
### Installation
```
pip install -q tensorflow tensorflow-datasets
```
#### Imports
```
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import keras
import tensorflow_datasets as tfds
```
### Checking datasets
```
print(tfds.list_builders())
```
### Getting data Infomation
```
builder = tfds.builder('rock_paper_scissors')
info = builder.info
print(info)
```
### Data Preparation
```
train = tfds.load(name='rock_paper_scissors', split="train")
test = tfds.load(name='rock_paper_scissors', split='test')
```
### Iterating over data
> To iterate over a tensorflow dataset we do it as follows
```
for data in train:
print(data['image'], data['label'])
break
```
### Creating a Numpy data
> We are going to scale our data and convert it to a nummpy array
```
train_images = np.array([data['image'].numpy()/255 for data in train])
train_labels =np.array([data['label'].numpy() for data in train])
test_image = np.array([data['image'].numpy()/255 for data in test])
test_labels = np.array([data['label'].numpy() for data in test])
train_images[0]
```
### Class Names
0 - Rock
1 - Paper
2 - Scissors
```
class_names = np.array(["rock", "paper", "scissor"])
```
### Creating a NN
```
input_shape = train_images[0].shape
input_shape
model = keras.Sequential([
keras.layers.Conv2D(32, (3, 3), input_shape=input_shape, activation='relu'),
keras.layers.MaxPool2D((3,3)) ,
keras.layers.Conv2D(64, (2, 2), activation='relu'),
keras.layers.MaxPool2D((2,2)),
keras.layers.Conv2D(64, (2, 2), activation='relu'),
keras.layers.MaxPool2D((2,2)),
keras.layers.Flatten(),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dense(3, activation='softmax')
])
model.summary()
```
### Combiling the Model
```
model.compile(
optimizer = keras.optimizers.Adam(learning_rate=.0001),
metrics=["accuracy"],
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
)
```
### Fitting the ModeL
```
EPOCHS = 5
BATCH_SIZE = 4
VALIDATION_SET = (test_image, test_labels)
history = model.fit(train_images, train_labels, epochs=EPOCHS, validation_data=VALIDATION_SET, batch_size=BATCH_SIZE)
```
### Model Evaluation Conclusion
Our model is performing perfect. The loss on the train_set is almost 0 as well as the validation loss. The accuracy on the train set is `100%` compared to `83%` accuracy on the test set.
> The model is just overtraining but giving us good results on the validation set.
### Making Predictions
```
predictions = model.predict(test_image[:10])
for i, j in zip(predictions, test_labels[:10]):
print(class_names[np.argmax(i)],"-------->", class_names[j])
```
### Tunning Hyper Parameters -- Keras-Tunner
* [Docs](https://www.tensorflow.org/tutorials/keras/keras_tuner)
### Installation
```
pip install -q -U keras-tuner
```
### Importing
```
import kerastuner as kt
def model_builder(hp):
model = keras.Sequential()
# we want the model to find the best unit and the activation function for the first layer for us
model.add(keras.layers.Conv2D(hp.Int('units', min_value=32, max_value=512, step=32),(3, 3),
input_shape=input_shape, activation=hp.Choice('activation-fn',values=['relu', 'sgd'])))
model.add(keras.layers.MaxPool2D((3,3)))
model.add(keras.layers.Conv2D(64, (2, 2), activation='relu'))
model.add(keras.layers.MaxPool2D((2,2)))
model.add(keras.layers.Conv2D(64, (2, 2), activation='relu'))
model.add(keras.layers.MaxPool2D((2,2)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Dense(3, activation='softmax'))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=10,
)
tuner.search(train_images, train_labels, validation_data=VALIDATION_SET, epochs=EPOCHS, batch_size=BATCH_SIZE)
```
> That's basically how the `kerastunner` works
| true |
code
| 0.794485 | null | null | null | null |
|
# Before we begin
* Github (Github Education)
* Bitbucket
* Kaggle
# Introduction
In this week, we want to implement an Ant Colony Optimization Algorithm to solve Travelling Sales Man problem.
```
import random
import math
import operator
import matplotlib.pyplot as plt
```
# Content
* Travelling Sales Man Problem
* Helper Functions
* Cost & Pheromone Graph
* Designing Ants
* Designing ACO
* Running
# Travelling Sales Man Problem(TSP)
The travelling salesman problem (TSP) asks the following question: "Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city and returns to the origin city?" It is an NP-hard problem in combinatorial optimization, important in operations research and theoretical computer science.

# Helper Functions
\begin{align}
Distance = \sqrt{ (y_2 - y_1)^2 + (x_2 - x_1)^2}
\end{align}
```
def distance(city1: dict, city2: dict):
return math.sqrt((city1['x'] - city2['x']) ** 2 + (city1['y'] - city2['y']) ** 2)
def plot(points, path: list):
x = []
y = []
for point in points:
x.append(point[0])
y.append(point[1])
y = list(map(operator.sub, [max(y) for i in range(len(points))], y)) # for better visualization
plt.plot(x, y, 'co')
for k in range(1, len(path)):
i = path[k - 1] # index of first city
j = path[k] # index of next city
plt.arrow(x[i], y[i], x[j] - x[i], y[j] - y[i], color='r', length_includes_head=True)
plt.xlim(0, max(x) * 1.1)
plt.ylim(0, max(y) * 1.1)
plt.show()
```
# Prerequisites
### ACO Algorihtm

### Strategy

With $Q \in [0,1]$
### Rho
\begin{align}
T_{ij}(t) \leftarrow rho * T_{ij}(t)
\end{align}
With $rho \in [0,1]$
### Transition Probability

With $\alpha, \beta \in [0,1]$
# Cost & Pheromone Graph
The graph is a data structure that has the matrices that we needed to evaluate transition probability:
```
class Graph(object):
def __init__(self, cost_matrix: list, rank: int):
"""
:param cost_matrix:
:param rank: rank of the cost matrix
"""
self.matrix = cost_matrix
self.rank = rank
# noinspection PyUnusedLocal
self.pheromone = [[1 / (rank * rank) for j in range(rank)] for i in range(rank)]
```
# Designing Ants
```
class Ant(object):
def __init__(self, aco, graph: Graph):
self.colony = aco
self.graph = graph
self.total_cost = 0.0
self.path = [] # path
self.pheromone_delta = [] # the local increase of pheromone
self.allowed = [i for i in range(graph.rank)] # nodes which are allowed for the next selection
self.eta = [[0 if i == j else 1 / graph.matrix[i][j] for j in range(graph.rank)] \
for i in range(graph.rank)] # heuristic information for calculating
start = random.randint(0, graph.rank - 1) # start from any node
self.path.append(start)
self.current = start
self.allowed.remove(start)
def select_next(self):
denominator = 0
for i in self.allowed:
denominator += self.graph.pheromone[self.current][i] ** self.colony.alpha \
* self.eta[self.current][i] ** self.colony.beta
# noinspection PyUnusedLocal
probabilities = [0 for i in range(self.graph.rank)] # probabilities for moving to a node in the next step
for i in range(self.graph.rank):
try:
self.allowed.index(i) # test if allowed list contains i
probabilities[i] = self.graph.pheromone[self.current][i] ** self.colony.alpha * \
self.eta[self.current][i] ** self.colony.beta / denominator
except ValueError:
pass # do nothing
# select next node by probability roulette
selected = 0
rand = random.random()
for i, probability in enumerate(probabilities):
rand -= probability
if rand <= 0:
selected = i
break
self.allowed.remove(selected)
self.path.append(selected)
self.total_cost += self.graph.matrix[self.current][selected]
self.current = selected
# noinspection PyUnusedLocal
def update_pheromone_delta(self):
self.pheromone_delta = [[0 for j in range(self.graph.rank)] for i in range(self.graph.rank)]
for k in range(1, len(self.path)):
i = self.path[k - 1]
j = self.path[k]
if self.colony.update_strategy == 1: # ant-quality system
self.pheromone_delta[i][j] = self.colony.Q
elif self.colony.update_strategy == 2: # ant-density system
# noinspection PyTypeChecker
self.pheromone_delta[i][j] = self.colony.Q / self.graph.matrix[i][j]
else: # ant-cycle system
self.pheromone_delta[i][j] = self.colony.Q / self.total_cost
```
# Designing ACO
```
class ACO(object):
def __init__(self, ant_count: int, generations: int,
alpha: float, beta: float, rho: float,
q: int, strategy: int):
"""
:param ant_count:
:param generations:
:param alpha: relative importance of pheromone
:param beta: relative importance of heuristic information
:param rho: pheromone residual coefficient
:param q: pheromone intensity
:param strategy: pheromone update strategy. 0 - ant-cycle, 1 - ant-quality, 2 - ant-density
"""
self.Q = q
self.rho = rho # Evapuration Rate
self.beta = beta
self.alpha = alpha
self.ant_count = ant_count
self.generations = generations
self.update_strategy = strategy
def _update_pheromone(self, graph: Graph, ants: list):
for i, row in enumerate(graph.pheromone):
for j, col in enumerate(row):
graph.pheromone[i][j] *= self.rho # Evapuration
for ant in ants:
graph.pheromone[i][j] += ant.pheromone_delta[i][j]
def solve(self, graph: Graph):
"""
:param graph:
"""
best_cost = float('inf')
best_solution = []
for gen in range(self.generations):
# noinspection PyUnusedLocal
ants = [Ant(self, graph) for i in range(self.ant_count)]
for ant in ants:
for i in range(graph.rank - 1):
ant.select_next()
ant.total_cost += graph.matrix[ant.path[-1]][ant.path[0]]
if ant.total_cost < best_cost:
best_cost = ant.total_cost
best_solution = [] + ant.path
# update pheromone
ant.update_pheromone_delta()
self._update_pheromone(graph, ants)
print('generation #{}, best cost: {}, path: {}'.format(gen, best_cost, best_solution))
return best_solution, best_cost
```
# Running
```
def main():
# Loading Data from files
cities = []
points = []
with open('./data/chn31.txt') as f:
for line in f.readlines():
city = line.split(' ')
cities.append(dict(index=int(city[0]), x=int(city[1]), y=int(city[2])))
points.append((int(city[1]), int(city[2])))
# Calculating Cost matrix => distance between city i and j
cost_matrix = []
rank = len(cities)
for i in range(rank):
row = []
for j in range(rank):
row.append(distance(cities[i], cities[j]))
cost_matrix.append(row)
# Instaniate ACO, and Run
aco = ACO(10, 100, 1.0, 10.0, 0.5, 10, 2)
graph = Graph(cost_matrix, rank)
path, cost = aco.solve(graph)
print('cost: {}, path: {}'.format(cost, path))
# Ploting the best cycle found
plot(points, path)
if __name__ == '__main__':
main()
```
| true |
code
| 0.555435 | null | null | null | null |
|
```
%pylab inline
```
# Drawing random numbers in Python
## 1. Drawing using the rectangular distribution
The prerequisite for drawing from a probability distribution is the ability to draw randomly from the rectangular or uniform distribution on $(0,1)$.
For any other distribution, draws can be generated by
1) draw $\xi$ randomly from the uniform distribution
2) evaluate the inverse cumulative distribution function $G^{-1}(x)$ at $\xi$
### Implementation in Python
Uniform numbers in Python are drawn by
``` python
import numpy as np
xi = np.random.rand(size)
```
Standard normally distributed values
```python
xi = np.random.randn(size)
```
#### Example
```python
import numpy as np
np.random.randn(100)
np.random.rand(100,10)
```
Probability distributions are implemented in _scipy_ with inverse cumulative distributions being implemented as **ppf** for the individual probability distributions:
``` python
import scipy.stats as stats
# normal distribution
stats.norm.ppf(q, loc = 0, scale = 1)
# gamma distribution
stats.gamma.ppf(q, a, loc = 0, scale = 1)
# t-distribution
stats.t.ppf(q, dof, loc = 0, scale = 1)
# poisson distribution
stats.poisson.ppf(q, mu, loc = 0)
```
### Exercise 1.1
Using the rectangular distribution, draw 1000 random numbers from
- normal distribution with mean $mu=0.2$ and standard deviation $\sigma=0.1$
- gamma distribution with shape parameter $a=2.5$ and scale parameter $s=0.2$
- t-distribution with 5 degrees of freedom, located around $3.5$ and with scale $s=0.8$
Plot a histogram for each outcome.
```
from numpy.random import rand
import scipy.stats as stats
```
## 2. Drawing using the built-in generator functions
The **scipy.stats** package provides over 90 different probability distributions, each with its own random number generating function.
The basic usage is
1) Import the **scipy.stats** package
``` python
import scipy.stats as stats
```
2) Call the **rvs** function of the sought probalitity distribution with size as keyword argument
``` python
xi = stats.norm.rvs(size=1000)
xi = stats.gamma.rvs(a, size=1000)
xi = stats.t.rvs(dof, size=1000)
```
The optional keyword parameters for each distribution correspond to those of the call for the inverse cumulative distribution function.
### Exercise 1.2
Repeat the random number generation from Exercise 1.1, but now use the built-in **rvs** function for each example.
### Curvilinear trapezoidal distribution
To sample from CTrap(a, b, d), make two draws $r_1$ and $r_2$ independently from the standard rectangular distribution $R(0, 1)$ and form
$$ a_s = (a − d) + 2dr_1 \qquad b_s = (a+b)-a_s , $$
and
$$ \xi = a_s + (b_s − a_s)r_2 . $$
In this way $a_s$ is a draw from the rectangular distribution with limits $a \pm d$. $b_s$ is then formed to ensure that the midpoint of $a_s$ and $b_s$ is the prescribed value $x = (a + b)/2$.
### Task
A certificate states that a voltage X lies in the interval 10.0 V ± 0.1 V. No other information is available concerning X, except that it is believed that the magnitude of the interval endpoints is the result of rounding correctly some numerical value. On this basis, that numerical value lies between 0.05 V and 0.15 V, since the numerical value of every point in the interval (0.05, 0.15) rounded to one significant decimal digit is 0.1. The location of the interval can therefore be regarded as fixed, whereas its width is inexact. The best estimate of X is x = 10.0 V.
Based on a = 9.9 V, b = 10.1 V and d = 0.05 V, sample from the PDF and calculate the best estimate and the associated uncertainty.
```
a = 9.9
b = 10.1
d = 0.05
```
| true |
code
| 0.695028 | null | null | null | null |
|
# Adau1761_0 IP
This notebook serves as a quick demonstration of the audio codec being used in the **PYNQ-Z2 board**. A new IP has been introduced to make use of the codec. Before starting with this notebook please ensure you have the following:
* Added the new audio.py file in the board
* Added the new pl.py file in the board
* Also, a new libsaudio.so is to be added
## How the new IP looks like?
This is a screenshot of the addition done to the exsisting base overlay. Instead of the original audio IP block the new one looks like this
<p align="center">
<img src ="./sources/IP.JPG" width="100%" height="100%"/>
</p>
As we can see :
* The **adau1761_0** IP is where the main AXI interactions take place. It also conists of a serializer, to serialize the audio going to the headphone jack, and a deserializer, to decode the sound coming from the MIC.
* The **axi_dma_0** IP is responsible for streaming audio data to the adau1761_0 through the _Slave AXI-Stream_ Interface of adau1761_0
* Thw **segement_stream_0** is responsible for controlling the _Master AXI_Stream_ Interface of adau1761_0
# Wavgen
This is a seprate python function to generate a sine wave and save it as a _.wav_ file. The function description is as follows:
```
audio_write("name_of_the_file.wav", sampling rate, time period, frequency of sine wave)
```
( Make sure to keep this jupyter nb in the same place where the wavegen.py file is)
```
from wavgen import audio_write
audio_write("./output/samples.wav",100,5,44)
```
The waveform being generated:
```
%matplotlib inline
import wave
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.fftpack import fft
wav_path = "./output/samples.wav"
with wave.open(wav_path, 'r') as wav_file:
raw_frames = wav_file.readframes(-1)
num_frames = wav_file.getnframes()
num_channels = wav_file.getnchannels()
sample_rate = wav_file.getframerate()
sample_width = wav_file.getsampwidth()
temp_buffer = np.empty((num_frames, num_channels, 4), dtype=np.uint8)
raw_bytes = np.frombuffer(raw_frames, dtype=np.uint8)
temp_buffer[:, :, :sample_width] = raw_bytes.reshape(-1, num_channels,
sample_width)
temp_buffer[:, :, sample_width:] = \
(temp_buffer[:, :, sample_width-1:sample_width] >> 7) * 255
frames = temp_buffer.view('<i4').reshape(temp_buffer.shape[:-1])
for channel_index in range(num_channels):
plt.figure(num=None, figsize=(15, 3))
plt.title('Audio in Time Domain (Channel {})'.format(channel_index))
plt.xlabel('Time in s')
plt.ylabel('Amplitude')
time_axis = np.arange(0, num_frames/sample_rate, 1/sample_rate)
plt.plot(time_axis, frames[:, channel_index])
plt.show()
```
# Initialization
### Create a new audio object
```
from audio import *
base=Overlay("./sources/AXIS_audio.bit")
Audiobj=base.adau1761_0
```
## Bypass audio
Users can select either `LINE_IN`, or `HP+MIC` as the input port.
In the following example, we choose `LINE_IN`. To choose `MIC`:
```python
pAudio.select_microphone()
```
or choose `LINE_IN`:
```python
pAudio.select_line_in()
```
```
Audiobj.select_microphone()
```
## Load and play
Load a sample and play the loaded sample.
```
Audiobj.load("./sources/sine.wav")
```
## Play function
## Stream
Copy the list genrated from the audio file (the load() function generates this) into an array.
```
buf = Audiobj.buffer
```
Create a continous allocated memory numpy array
```
import pynq.lib.dma
from pynq import Xlnk
xlnk = Xlnk()
dma_send = base.axi_dma_0
cma_ar = xlnk.cma_array(buf.shape, buf.dtype)
cma_ar[:] = buf
```
The `playinit()` initializes the various audio codec registers.
The numpy array which we declared above is passed onto the **DMA** send channel.
```
async def play_audio():
Audiobj.playinit()
dma_send.sendchannel.transfer(cma_ar)
await dma_send.sendchannel.wait_async()
```
## Monitoring the CPU Usage
To see how CPU usages is impacted by the audio stream we create another task that prints out the current CPU utilisation every 3 seconds.
```
import psutil
import asyncio
@asyncio.coroutine
def print_cpu_usage():
# Calculate the CPU utilisation by the amount of idle time
# each CPU has had in three second intervals
last_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
while True:
yield from asyncio.sleep(3)
next_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
usage = [(1-(c2-c1)/3) * 100 for c1,c2 in zip(last_idle, next_idle)]
print("CPU Usage: {0:3.2f}%, {1:3.2f}%".format(*usage))
last_idle = next_idle
audio_task = asyncio.ensure_future(play_audio())
cpu_task = asyncio.ensure_future(print_cpu_usage())
asyncio.get_event_loop().run_until_complete(audio_task)
```
The `playend()` mutes the various audio codec registers which were being used.
```
Audiobj.playend()
```
### Slave
The play() function of the AXI-Slave is not configured properly. Please note.
```
Audiobj.play()
```
## Record function
Records a 5-second sample and is stored in a continous memory allocated array :
### Stream
Enter the time for which the recording will take place:
```
seconds = 5
```
Create a continous allocated memory numpy array
```
import numpy as np
import pynq.lib.dma
from pynq import Xlnk
xlnk = Xlnk()
dma_send = base.axi_dma_0
cma_ar = xlnk.cma_array(shape = seconds * 2 * 48000, dtype = "uint32")
```
The segement_stream is responsible for managing the AXI-Stream transactions between the `MIC` (Master AXI Stream) of the audio codec and the PS (Slave Stream).
```
base.segment_stream_0.write(0, seconds * 2 * 48000)
```
After this we have to send the audio array to the DMA
```
Audiobj.recordinit(seconds)
dma_send.recvchannel.transfer(cma_ar)
dma_send.recvchannel.wait()
```
And then to play it, we will use the DMA again to play from the array:
```
Audiobj.playinit()
dma_send.sendchannel.transfer(cma_ar)
dma_send.sendchannel.wait()
Audiobj.playend()
```
### Slave
This here again is the recording function, but uses the **AXI-Slave** instead of the **AXI-Stream**.
```
Audiobj.record(seconds=5)
Audiobj.play()
```
| true |
code
| 0.444866 | null | null | null | null |
|
# Classifying cancer from 32 parameters
Data is taken from https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
We simply read all the data, drop the patient ID and place the label into an array of it's own.
```
import csv
import numpy
with open('data_Cancer.csv') as input_file:
text_data = [row for row in csv.reader(input_file, delimiter=',')]
for line in text_data:
_ = line.pop(0) #We remove the ID - no need for it
known_labels = ','.join([line.pop(0) for line in text_data])
raw_data = numpy.array(text_data).astype(numpy.float)
data = raw_data / numpy.max(raw_data, axis = 0)
```
Now we can write a generic clustering mechanism, similar to the small previous example.
```
def all_dist(observation, data):
return numpy.sqrt((data[:, 0] - observation[0])**2 + (data[:, 1] - observation[1])**2)
def cluster(data, k):
samples, _= data.shape
centroids = numpy.array([data[numpy.random.randint(samples), :,] for _ in range(k)])
done = False
while not done:
distances = numpy.empty((k,samples))
for d in range(k):
distances[d, :] = all_dist(centroids[d], data)
winners = numpy.argmin(distances, axis = 0)
clusters = [data[winners == i, :] for i in range(k)]
prev_centroids = centroids
centroids = numpy.array([numpy.average(c, axis = 0) for c in clusters])
if numpy.sum(prev_centroids-centroids) == 0:
done=True
return winners
```
Now we can find the clusters, since we have only two categories its rather fast. We cannot know if category 0 is malign or benign, but have to assume that the smaller category is malign. We thus change the labels to that assumption. Then we can easily compare the classifications of each patient and check who well we did.
```
clusters = cluster(data, 2)
a, b = numpy.bincount(clusters)
labels = known_labels+''
if a<b:
labels = labels.replace('M','0')
labels = labels.replace('B','1')
else:
labels = labels.replace('M','1')
labels = labels.replace('B','0')
compare = (numpy.equal(clusters, numpy.array(labels.split(',')).astype(numpy.int)))
print(numpy.bincount(compare),'(Wrong, Right)')
```
Run it a few times and realize that success differ extremely. Several approaches can be tried to remedy this.
Try and simply remove one or more dimensions to see if they are merely in the way (really: do a PCA but QaD tests are ok as well).
Try and change the distance metric for individual dimensions, so rather than simply include or not at in the first appraoch, we can tune the importance of a parameter.
```
def cluster(data, k, centroids = []):
samples, _= data.shape
if centroids == []:
centroids = numpy.array([data[numpy.random.randint(samples), :,] for _ in range(k)])
done = False
while not done:
distances = numpy.empty((k,samples))
for d in range(k):
distances[d, :] = all_dist(centroids[d], data)
winners = numpy.argmin(distances, axis = 0)
clusters = [data[winners == i, :] for i in range(k)]
prev_centroids = centroids
clusters = [c for c in clusters if len(c)>0]
k = len(clusters)
centroids = numpy.array([numpy.average(c, axis = 0) for c in clusters])
if len(prev_centroids) == len(centroids):
if numpy.sum(prev_centroids-centroids) == 0:
done=True
return winners, centroids
target_k = 2
n_centroids = 25
centroids = []
while n_centroids > target_k:
clusters, centroids = cluster(data, n_centroids, centroids)
if ( n_centroids > target_k ) and ( len(centroids) == n_centroids ):
centroid_dist = numpy.sum(numpy.sqrt((centroids[:, numpy.newaxis, :]-centroids)**2), axis =2)
centroid_dist[centroid_dist==0] = 1000.0
centroids = list(centroids)
minpos = numpy.argmin(centroid_dist)
point0, point1 = centroids.pop(minpos//n_centroids), centroids.pop((minpos%n_centroids)-1) #-1 because we pop
centroids.append((point0 + point1)/2)
n_centroids -= 1
else:
n_centroids = len(centroids)
clusters, centroids = cluster(data, n_centroids, centroids) #We have the number of required centroids now
a, b = numpy.bincount(clusters)
labels = known_labels+''
if a<b:
labels = labels.replace('M','0')
labels = labels.replace('B','1')
else:
labels = labels.replace('M','1')
labels = labels.replace('B','0')
compare = (numpy.equal(clusters, numpy.array(labels.split(',')).astype(numpy.int)))
print(numpy.bincount(compare),'(Wrong, Right)')
```
***
Note to self - try with many more clusters, and after convergence,
fuse the two clusters that are closest to one and repeat training.
Repeat until the desired number of clusters are found.
Fusing: simple mean, weighted mean or most discriminating (one furthest away from other centroids)
***
| true |
code
| 0.20838 | null | null | null | null |
|
Tutorial on computational modeling and statistical model fitting part of the *IBL Computational Neuroscience Course* organized by the [International Brain Laboratory](https://www.internationalbrainlab.com/) (April 2020). **Lecturer:** [Luigi Acerbi](http://luigiacerbi.com/).
**Instructions:**
- To run the tutorial, you will need a standard scientific Python 3.x installation with Jupyter notebook (such as [Anaconda](https://www.anaconda.com/distribution/)).
- You will also need the `CMA-ES` optimization algorithm (see [here](https://github.com/CMA-ES/pycma)). You can install CMA-ES from the command line with `pip install cma`.
- For any question, please email the course instructor at [email protected].
**Initial setup and loading the data:**
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy as sp
from scipy.stats import norm
import cma
```
During this tutorial, we are going to use data from the [International Brain Laboratory](https://www.internationalbrainlab.com/) publicly released behavioral mouse dataset, from exemplar mouse `KS014`. See [this preprint](https://www.biorxiv.org/content/10.1101/2020.01.17.909838v2) for more information about the task and datasets. These data can also be inspected via the IBL DataJoint public interface [here](https://data.internationalbrainlab.org/mouse/18a54f60-534b-4ed5-8bda-b434079b8ab8).
For convenience, the data of all behavioral sessions from examplar mouse `KS014` have been already downloaded in the `data` folder and slightly preprocessed into two `.csv` files, one for the training sessions (`KS014_train.csv`) and one with the *biased* sessions (`KS014_biased.csv`).
We begin our tutorial by examining the training sessions.
```
df = pd.read_csv('./data/KS014_train.csv') # Load .csv file into a pandas DataFrame
df['signed_contrast'] = df['contrast']*df['position'] # We define a new column for "signed contrasts"
df.drop(columns='stim_probability_left', inplace=True) # Stimulus probability has no meaning for training sessions
print('Total # of trials: ' + str(len(df['trial_num'])))
print('Sessions: ' + str(np.unique(df['session_num'])))
df.head()
```
**Inspecting the data:**
The first thing to do with any dataset is to get familiar with it by running simple visualizations. Just plot stuff!
For example, as a starter we plot data from individual sessions using a *scatterplot* format (perhaps not the best). What can we see from here?
```
def scatterplot_psychometric_data(df,session_num=None,ax=None):
"""Plot psychometric data (optionally, of a chosen training session) as a scatter plot."""
if session_num == None:
trial_mask = np.ones(len(df['session_num']), dtype=bool) # Select all trials
else:
trial_mask = df['session_num'] == session_num # Indexes of trials of the chosen session
Ntrials = np.sum(trial_mask) # Number of chosen trials
# Count "left" and "right" responses for each signed contrast level
left_resp = df[(df['response_choice'] == -1) & trial_mask].groupby(['signed_contrast']).count()['trial_num']
right_resp = df[(df['response_choice'] == 1) & trial_mask].groupby(['signed_contrast']).count()['trial_num']
if ax == None:
ax=fig.add_axes([0,0,1,1])
ax.scatter(left_resp.index,np.zeros(len(left_resp.index)), s=left_resp*10);
ax.scatter(right_resp.index,np.ones(len(right_resp.index)), s=right_resp*10);
ax.set_xlabel('Signed contrast (%)')
ax.set_ylabel('Rightward response')
if session_num == None:
ax.set_title('Psychometric data (# trials = ' + str(Ntrials) + ')')
else:
ax.set_title('Psychometric data (session ' + str(session_num) + ', # trials = ' + str(Ntrials) + ')')
return ax
# Plot 2nd session
fig = plt.figure(figsize=(9,4))
scatterplot_psychometric_data(df,2)
plt.show()
# Plot 15th session (last training session)
fig = plt.figure(figsize=(9,4))
scatterplot_psychometric_data(df,15)
plt.show()
```
We plot the same data again, this time with a different type of plot which may be more informative.
```
def plot_psychometric_data(df,session_num=None,ax=None):
"""Plot psychometric data (optionally, of a chosen training session) as a scatter plot."""
if session_num == None:
trial_mask = np.ones(len(df['session_num']), dtype=bool) # Select all trials
else:
trial_mask = df['session_num'] == session_num # Indexes of trials of the chosen session
Ntrials = np.sum(trial_mask) # Number of chosen trials
# Count "left" and "right" responses for each signed contrast level
left_resp = df[(df['response_choice'] == -1) & trial_mask].groupby(['signed_contrast']).count()['trial_num']
right_resp = df[(df['response_choice'] == 1) & trial_mask].groupby(['signed_contrast']).count()['trial_num']
frac_resp = right_resp / (left_resp + right_resp)
err_bar = np.sqrt(frac_resp*(1-frac_resp)/(left_resp + right_resp)) # Why this formula for error bars?
if ax == None:
ax=fig.add_axes([0,0,1,1])
ax.errorbar(x=left_resp.index,y=frac_resp,yerr=err_bar,label='data');
ax.set_xlabel('Signed contrast (%)')
ax.set_ylabel('Rightward response')
if session_num == None:
ax.set_title('Psychometric data (# trials = ' + str(Ntrials) + ')')
else:
ax.set_title('Psychometric data (session ' + str(session_num) + ', # trials = ' + str(Ntrials) + ')')
plt.xlim((-105,105))
plt.ylim((0,1))
return ax
fig = plt.figure(figsize=(9,4))
plot_psychometric_data(df,2)
plt.show()
fig = plt.figure(figsize=(9,4))
plot_psychometric_data(df,15)
plt.show()
```
**The psychometric function model:**
We define now the `basic` psychometric function (descriptive) model and a plotting function.
```
def psychofun(theta,stim):
"""Psychometric function based on normal CDF and lapses"""
mu = theta[0] # bias
sigma = theta[1] # slope/noise
lapse = theta[2] # lapse rate
if len(theta) == 4: # lapse bias
lapse_bias = theta[3];
else:
lapse_bias = 0.5 # if theta has only three elements, assume symmetric lapses
p_right = norm.cdf(stim,loc=mu,scale=sigma) # Probability of responding "rightwards", without lapses
p_right = lapse*lapse_bias + (1-lapse)*p_right # Adding lapses
return p_right
def psychofun_plot(theta,ax):
"""Plot psychometric function"""
stim = np.linspace(-100,100,201) # Create stimulus grid for plotting
p_right = psychofun(theta,stim) # Compute psychometric function values
ax.plot(stim,p_right,label='model')
ax.legend()
return
```
Now try plotting the psychometric function for different values of the parameters (use both the symmetric and asymmetric psychometric function). Try and match the data from one of the sessions.
```
theta0 = (0,50,0.2,0.5) # Arbitrary parameter values - try different ones
session_num = 15
fig = plt.figure(figsize=(9,4))
ax = plot_psychometric_data(df,session_num)
psychofun_plot(theta0,ax)
plt.show()
```
We now define the log likelihood function of the psychometric function model for a given dataset and model parameter vector, $\log p(\text{data}|\mathbf{\theta})$.
```
def psychofun_loglike(theta,df):
"""Log-likelihood for psychometric function model"""
s_vec = df['signed_contrast'] # Stimulus values
r_vec = df['response_choice'] # Responses
p_right = psychofun(theta,s_vec)
# Compute summed log likelihood for all rightwards and leftwards responses
loglike = np.sum(np.log(p_right[r_vec == 1])) + np.sum(np.log(1 - p_right[r_vec == -1]))
return loglike
```
Now try to get the best fit for this session, as we did before, but by finding better and better values of the log-likelihood.
```
session_num = 14 # Let's use a different session
theta0 = (0,25,0.1,0.5)
ll = psychofun_loglike(theta0,df[df['session_num'] == session_num])
print('Log-likelihood value: ' + "{:.3f}".format(ll))
fig = plt.figure(figsize=(9,4))
ax = plot_psychometric_data(df,session_num)
psychofun_plot(theta0,ax)
plt.show()
```
**Maximum-likelihood estimation:**
In this section, we are going to estimate model parameters (aka fit our models) by maximizing the log-likelihood. By convention in optimization, we are going to *minimize* the negative log-likelihood.
Before running the optimization, we define the *hard* lower and upper bounds for the parameters. If the optimization algorithm supports constrained (bound) optimization, it will never go outside the hard bounds. We also define informally the *plausible* bounds as the range of parameters that we would expect to see. We are going to use the plausible range to initialize the problem later.
```
# Define hard parameter bounds
lb = np.array([-100,0.5,0,0])
ub = np.array([100,200,1,1])
bounds = [lb,ub]
# Define plausible range
plb = np.array([-25,5,0.05,0.2])
pub = np.array([25,25,0.40,0.8])
# Pick session data
session_num = 14
df_session = df[df['session_num'] == session_num]
# Define objective function: negative log-likelihood
opt_fun = lambda theta_: -psychofun_loglike(theta_,df_session)
```
We are now going to run a *black-box* optimization algorithm called CMA-ES. For now we are going to run the optimization only once, but in general you should *always* run the optimization from multiple distinct starting points.
```
# Generate random starting point for the optimization inside the plausible box
theta0 = np.random.uniform(low=plb,high=pub)
# Initialize CMA-ES algorithm
opts = cma.CMAOptions()
opts.set("bounds",bounds)
opts.set("tolfun",1e-5)
# Run optimization
res = cma.fmin(opt_fun, theta0, 0.5, opts)
print('')
print('Returned parameter vector: ' + str(res[0]))
print('Negative log-likelihood at solution: ' + str(res[1]))
fig = plt.figure(figsize=(9,4))
ax = plot_psychometric_data(df_session,session_num)
psychofun_plot(res[0],ax)
plt.show()
```
**Model comparison:**
We consider now a slightly more advanced model which includes time dependency by having the response in the current trial being influenced by the response in the previous trial. We adopt a simple model, `repeatlast`, in which the observer has a fixed chance of repeating the previous choice.
```
def psychofun_repeatlast_loglike(theta,df):
"""Log-likelihood for last-choice dependent psychometric function model"""
s_vec = np.array(df['signed_contrast']) # Stimulus values
r_vec = np.array(df['response_choice']) # Responses
p_last = theta[0] # Probability of responding as last choice
theta_psy = theta[1:] # Standard psychometric function parameters
p_right = psychofun(theta_psy,s_vec)
# Starting from the 2nd trial, probability of responding equal to the last trial
p_right[1:] = p_last*(r_vec[0:-1] == 1) + (1-p_last)*p_right[1:]
# Compute summed log likelihood for all rightwards and leftwards responses
loglike = np.sum(np.log(p_right[r_vec == 1])) + np.sum(np.log(1 - p_right[r_vec == -1]))
return loglike
lb = np.array([0,-100,1,0,0])
ub = np.array([1,100,100,1,1])
bounds = [lb,ub]
plb = np.array([0.05,-25,5,0.05,0.2])
pub = np.array([0.2,25,25,0.45,0.8])
df_session = df[df['session_num'] == session_num]
# df_session = df[(df['session_num'] == session_num) & (df['trial_num'] > 300)]
opt_fun = lambda theta_: -psychofun_repeatlast_loglike(theta_,df_session)
theta0 = np.random.uniform(low=plb,high=pub)
opts = cma.CMAOptions()
opts.set("bounds",bounds)
opts.set("tolfun",1e-5)
res_repeatlast = cma.fmin(opt_fun, theta0, 0.5, opts)
print('')
print('Returned parameter vector: ' + str(res_repeatlast[0]))
print('Negative log-likelihood at solution: ' + str(res_repeatlast[1]))
fig = plt.figure(figsize=(9,4))
ax = plot_psychometric_data(df_session,session_num)
#psychofun_plot(res[0],ax)
plt.show()
```
We now calculate a few model simple comparison metrics, such as AIC and BIC, for the `basic` and `repeatlast` models.
```
Nmodels = 2
nll = np.zeros(Nmodels)
nparams = np.zeros(Nmodels)
results = [res,res_repeatlast] # Store all optimization output in a vector
for i in range(0,len(results)):
nll[i] = results[i][1] # The optimization algorithm received the *negative* log-likelihood
nparams[i] = len(results[i][0])
ntrials = len(df['signed_contrast'])
aic = 2*nll + 2*nparams
bic = 2*nll + nparams*np.log(ntrials)
print('Model comparison results (for all metrics, lower is better)\n')
print('Negative log-likelihoods: ' + str(nll))
print('AIC: ' + str(aic))
print('BIC: ' + str(bic))
```
**[Advanced] Optional model:**
We consider next a more advanced model which includes explicit time dependency (the trials are not all the same), also known as *non-stationarity*. Note that this function is not coded very efficiently and runs quite slowly due to the `for` loop - it could be improved with vectorization.
```
def psychofun_timevarying_loglike(theta,df):
"""Log-likelihood for time-varying psychometric function model"""
s_vec = np.array(df['signed_contrast']) # Stimulus values
r_vec = np.array(df['response_choice']) # Responses
Ntrials = len(s_vec)
mu_vec = np.linspace(theta[0],theta[4],Ntrials)
sigma_vec = np.linspace(theta[1],theta[5],Ntrials)
lapse_vec = np.linspace(theta[2],theta[6],Ntrials)
lapsebias_vec = np.linspace(theta[3],theta[7],Ntrials)
p_right = np.zeros(Ntrials)
for t in range(0,Ntrials):
p_right[t] = psychofun([mu_vec[t],sigma_vec[t],lapse_vec[t],lapsebias_vec[t]],s_vec[t])
# Compute summed log likelihood for all rightwards and leftwards responses
loglike = np.sum(np.log(p_right[r_vec == 1])) + np.sum(np.log(1 - p_right[r_vec == -1]))
return loglike
theta0 = (0,20,0.1,0.5,1,20,0.1,0.5)
ll = psychofun_timevarying_loglike(theta0,df[df['session_num'] == session_num])
lb = np.array([-100,1,0,0,-100,1,0,0])
ub = np.array([100,100,1,1,100,100,1,1])
bounds = [lb,ub]
plb = np.array([-25,5,0.05,0.2,-25,5,0.05,0.2])
pub = np.array([25,25,0.45,0.8,25,25,0.45,0.8])
session_num = 14
df_session = df[df['session_num'] == session_num]
# df_session = df[(df['session_num'] == session_num) & (df['trial_num'] > 300)]
opt_fun = lambda theta_: -psychofun_timevarying_loglike(theta_,df_session)
theta0 = np.random.uniform(low=plb,high=pub)
opts = cma.CMAOptions()
opts.set("bounds",bounds)
opts.set("tolfun",1e-5)
res_time = cma.fmin(opt_fun, theta0, 0.5, opts)
print('')
print('Returned parameter vector: ' + str(res_time[0]))
print('Negative log-likelihood at solution: ' + str(res_time[1]))
fig = plt.figure(figsize=(9,4))
ax = plot_psychometric_data(df_session,session_num)
#psychofun_plot(res[0],ax)
plt.show()
```
| true |
code
| 0.719895 | null | null | null | null |
|
```
# hide
%load_ext autoreload
from nbdev import *
# default_exp annotate
```
# Annotate
> Tools to support creating and process annotation for samples of Newspaper Navigator data using Label Studio
```
# hide
from nbdev.showdoc import *
# export
from nnanno.core import *
# export
from tqdm.notebook import trange, tqdm
from toolz.itertoolz import count
import pandas as pd
from pandas import json_normalize
import simplejson as json
import requests
import re
from datetime import datetime
from glob import glob
from pathlib import Path
# export
import nnanno
from typing import Union, Optional, Type
```
## Annotating Newspaper Navigator data
Once you have created a sample of Newspaper Navigator data using `sample`, you might want to annotate it somehow. These annotations may function as the input for a machine learning model or could be used directly to explore images in the newspaper navigator data. The `Examples` section in the documentation shows how annotations can generate training data for machine learning tasks.
## Setup annotation task
The bulk of annotation work is outsourced to label studio, which provides a flexible annotations system that supports annotations for various data types, including images and text. This module does a few steps to help process annotations produced through label studio. This module is essentially some suggestions on how you can get label-studio setup with data from Newspaper Navigator.
First, we'll create a small sample of images we want to annotate using `sample`. If you have already done this step, you can skip this.
```
# export
from nnanno.sample import *
sampler = nnSampler()
df = sampler.create_sample(
50, "photos", start_year=1910, end_year=1920, year_sample=False
)
```
There are a few ways in which we can use label studio to annotate. For example, we could download images from our sample using `sample.download_sample`. However, if we have a large sample of images, we might want to do some annotating before downloading all of these images locally.
Label-studio supports annotating from a URL. We can use this combined with IIIF to annotate images without downloading them all first since IIIF is a flexible interface for getting images. IIIF also gives us flexibility in annotating at a smaller resolution/size before downloading higher-res images.
## Create label studio annotation tasks
Label-studio supports a load of different ways of setting up 'tasks'. In this context, a 'task' is an image to be annotated. One way of setting up a task is to import a `JSON` file that includes tasks. To do this, we take an existing sample DataFrame and add column `image`, which contains a IIIF URL.
```
# export
def create_label_studio_json(
sample: Union[pd.DataFrame, Type[nnSampler]],
fname: Union[str, Path, None] = None,
original: bool = True,
pct: Optional[int] = None,
size: Optional[tuple] = None,
preserve_asp_ratio: bool = True,
):
"""create a json file which can be used to upload tasks to label studio"""
if fname and Path(fname).exists():
raise FileExistsError(f"{fname} already exists")
if fname is None:
today = datetime.today()
time_stamp = today.strftime("%Y_%d_%m_%H_%M")
fname = f"{time_stamp}_tasks.json"
if type(sample) == nnanno.sample.nnSampler:
try:
sample = sample.sample.copy()
except AttributeError as e:
print(f"{sample} doesn't have a sample associated with it")
else:
sample = sample.copy()
sample["image"] = sample.apply(
lambda x: iiif_df_apply(
x,
original=original,
pct=pct,
size=size,
preserve_asp_ratio=preserve_asp_ratio,
),
axis=1,
)
label_studio_json = sample.apply(lambda x: x.to_dict(), axis=1).to_list()
with open(fname, "w") as f:
json.dump(label_studio_json, f, ignore_nan=True)
```
We can pass in either a dataframe or `nnSampler` to `create_label_studio_json`. This is a simple function that will create a `JSON` file that can create 'tasks' in labels studio. In this example, we pass in size parameters. This is used to generate a IIIF URL that will request this size.
```
create_label_studio_json(df, "tasks.json", size=(500, 500))
# hide
Path("tasks.json").unlink()
```
This creates a `JSON` file we can use to load tasks into label-studio.
### Importing tasks into label studio
To avoid this documentation becoming out of date, I haven't included screenshots etc. However, you can currently (January 2021) create tasks in label studio via the GUI or by passing in tasks through the CLI. For example, to load the tasks and create a template for annotating classifications
```bash
label-studio init project_name --template=image_classification --input-path=tasks.json
```
You can then start label-studio and complete the rest of the setup via the GUI.
```bash
label-studio start ./project_name
```
## Setting up labeling
For a proper introduction to configuring your labels, consult the label studio [documentation](https://labelstud.io/guide/). One way in which you can setup labels is to use a template as shown above. This template setups an image classification task. There are other [templates](https://labelstud.io/templates/) for different tasks. These templates consist of `XML` templates that define your labels. These templates allow you to define how you want to label your images and share these definitions with others. For example
```xml
<View>
<Choices name="choice" toName="image" showInLine="true" choice="multiple">
<Choice value="human"/>
<Choice value="animal"/>
<Choice value="human-structure"/>
<Choice value="landscape"/>
</Choices>
<Image name="image" value="$image"/>
</View>
```
You can change many other options in Label-studio. It also includes features such as adding a machine learning backend to support annotations.
### Notes on labelling using IIIF images
There are a few things to consider and be aware of when loading images via IIIF in label studio.
#### Missing images
Occasionally when you are doing your annotations in label studio for IIIF URLs, you will get a missing image error. This is probably because for some reason the IIIF URL has been generated incorrectly for that image, or that image doesn't exist via IIIF. If this happens, you can 'skip' this image in the annotation interface.
#### Setting a comfortable size for viewing
You can take advantage of the flexibility of IIIF by requesting images to be a specific size when you create the tasks. This also helps speed up the process of loading each image since we often request a smaller sized image to fit it in on a smallish screen comfortably.
#### Annotating vs training image size, resolution etc.
IF you are annotating labels or classifications, you may decide to annotate at a smaller size or quality and work with a higher quality image when you come to training a model. If you are doing any annotations of pixels or regions of the image, you will want to be careful to make sure these aren't lost if moving between different sizes of the image.
### Exporting and loading annotations from label studio
Label studio supports a broad range of annotation tasks which may require particular export formats i.e. COCO or VOC for object detection. Since the processing of these outputs is tasks specific this module only contains functionality to deal with image classification and labeling tasks since these were the tasks covered in the Programming Historian lessons for which this code was originally written.
### Exporting and processing CSV
Once you have finished annotating all your images or got too bored of annotating, you can export in various formats, including JSON and CSV. A CSV export is often sufficient for simple tasks and has the additional benefit of having a lower barrier to entry than JSON for people who aren't coders.
We'll now process the annotations we generated above and labeled using label studio
```
# export
def process_labels(x):
try:
x = "|".join(eval(x)["choices"])
except:
NameError
return x
# exports
def load_annotations_csv(csv: Union[str, Path], kind="classification"):
if kind == "classification":
df = pd.read_csv(csv, converters={"box": eval})
df["label"] = df["choice"]
return df
if kind == "label":
df = pd.read_csv(csv, converters={"box": eval})
df["label"] = df["choice"].apply(process_labels)
return df
```
As you can see above, this code doesn't do much to process the annotations into a DataFrame. The main things to note are the `kind` parameter. The CSV export for labelling tasks includes a column that contains a JSON with the labels. In this case, we use a pandas converter and `eval` and grab the choices, which returns a list of labels.
If we look at the columns from the annotation DataFrame we'll see that label studio kept the original metadata. We now have a new column `label` that contains our annotations. We also have a column `choice` containing the original column format from the label studio export, which will be different from the `label` column when processing labelling annotations.
```
annotation_df = load_annotations_csv("test_iiif_anno/label_studio_export.csv")
annotation_df.columns
# hide
assert "choice" in annotation_df.columns
```
We can now do the usual Pandas things to start exploring our annotations further. For example we can see how many of each label option we have
```
annotation_df["choice"].value_counts()
```
### Downloading the images associated with annotations
Once we have some annotations done, we'll often want to get the original images to work locally. This is particularly important if we are planning to train a machine learning model with these images. Although it is possible to train a model using the images from IIIF, since we'll usually be grabbing these images multiple times for each epoch, this isn't particularly efficient and isn't very friendly to the IIIF endpoint.
We can use the `sampler.download_sample` method to download our sample; we just pass in our annotation DataFrame a folder we want to download images to and an optional name to save our 'log' of the download. We can also pass in different parameters to request different size etc. of the image. See the `download_sample` docs for more details.
```
sampler.download_sample(
"test_iiif_anno/test_dl", df=annotation_df, original=True, json_name="test_dl"
)
# hide
# test we have a very similar number of images downloaded and in our annotation dataframe
# allow for some images to be missing
images = list(Path("test_iiif_anno/test_dl").rglob("*.jpg"))
test_close(len(images), len(annotation_df), eps=1)
```
### Moving between local annotation and the cloud ☁
Although 'storage is cheap', it isn't free. One helpful feature of the IIIF annotations workflow is that it allows you to annotate 'locally,' i.e. on a personal computer and then quickly move the information required to download all the images into the cloud without having to pass the images themselves around. This is particularly useful if you will use a service like Google Colab to train a computer vision model, i.e. you don't have the resources to rent GPUs.
In the context of working with limited bandwidth, it might also be relatively time-consuming to download a large set of images. However, it might be feasible to get around this by annotating using the IIIF images and then using a service like google Colab when you want to grab the actual images files. Since Colab is running in the cloud with a big internet tube, this should be much more doable even if your internet is limited.
Once you have download your images you may want to check if any images weren't able to download. You can do this using the `check_download_df_match` function.
```
# export
def check_download_df_match(dl_folder: Union[Path, str], df: pd.DataFrame) -> str:
im_count = count(
f for f in Path(dl_folder).iterdir() if f.suffix in image_extensions
)
if type(df) == pd.core.frame.DataFrame:
if len(df) == im_count:
print(
f"Length of DataFrame {len(df)} and number of images in {dl_folder} {im_count} match",
"\U0001F600",
)
if len(df) != im_count:
print(
f"Length of DataFrame {len(df)} and number of images in {dl_folder} {im_count} do not match",
"\U0001F615",
)
```
This will let you know if you have a different number of downloaded images compared to the number of rows in the DataFrame.
```
check_download_df_match("test_iiif_anno/test_dl", annotation_df)
```
## Working with the annotations
This will really depend on the framework or library you want to use. In fastai the process is simple since our data matches one of the fastai 'factory' methods for loading data.
### Loading with fastai
```
# slow
from fastai.vision.all import *
# slow
df = pd.read_json("test_iiif_anno/test_dl/test_dl.json")
# slow
dls = ImageDataLoaders.from_df(
df,
path="test_iiif_anno/test_dl",
fn_col="download_image_path",
label_col="choice",
item_tfms=Resize(64),
bs=4,
)
# slow
dls.show_batch()
# hide
[f.unlink() for f in Path("test_iiif_anno/test_dl").iterdir()]
Path("test_iiif_anno/test_dl").rmdir()
```
## Process completions directly
Label studio stores annotations as json files so we can work with these directly without using the exports from label studio. This code below shows how to do this but the above approach is likely to be more reliable.
```
# export
def load_df(json_file: Union[str, Path]):
with open(json_file) as f:
data = json.load(f)
df = json_normalize(data, record_path=["completions"], meta=["data"])
# df['result'] = df['result'].apply(lambda x: return_choice(x[0]) if len([x][0]) ==1 else x)
df["result"] = df["result"].apply(
lambda x: x[0]["value"]["choices"] if len([x][0]) == 1 else x
)
return df
# export
def load_completions(path: Union[str, Path]):
filenames = glob(f"{path}/completions/*.json")
dataframes = [load_df(f) for f in filenames]
return pd.concat(dataframes)
# slow
df = load_completions("../ph/ads/ad_annotations/")
df.head(1)
# slow
# df = load_completions('../ph/photos/multi_label/')
# df.head(1)
# exporti
def _df_to_csv(df, out_fn):
df[["data", "result"]].to_csv(
out_fn,
header=[
"file",
"label",
],
index=False,
)
# exporti
def _df_to_json(df, out_fn):
df[["data", "value.choices"]].to_json(out_fn)
# exporti
def _df_to_pkl(df, out_fn):
df.to_pickle(out_fn)
# exporti
def get_og_filepath(x):
"""
Transforms a filepaths from processed ImageStudio format back to the Orginal Newspaper Navigator filepath format
"""
b, m, e = re.split("(_data_)", x)
m = m.replace("_", "/")
e = re.split("(\d{3}_\d{1}_\d{2}.jpg)", e)
return b + m + e[0].replace("_", "/") + e[1]
# export
def anno_sample_merge(
sample_df: pd.DataFrame, annotation_df: pd.DataFrame
) -> pd.DataFrame:
"""anno_sample_merge merges a DataFrame containing a sample
from Newspaper Navigator and a DataFrame containing annotations
Parameters
----------
sample_df : pd.DataFrame
A Pandas DataFrame which holds a sample from Newspaper Navigator Generated by `sample.nnSample()`
annotation_df : pd.DataFrame
A pandas DataFrame containing annotations loaded via the `annotate.nnAnnotations` class
Returns
-------
pd.DataFrame
A new DataFrame which merges the two input DataFrames
"""
sample_df, annotation_df = sample_df.copy(), annotation_df.copy()
annotation_df["id"] = annotation_df["data"].map(lambda x: get_og_filepath(x))
return sample_df.merge(annotation_df, left_on="filepath", right_on="id")
sample_df = pd.read_csv("../ph/ads/sample.csv", index_col=0)
# export
class nnAnnotations:
def __init__(self, df):
self.annotation_df = df
self.labels = df["result"].unique()
self.label_counts = df["result"].value_counts()
def __repr__(self):
return f"{self.__class__.__name__}" f" #annotations:{len(self.annotation_df)}"
@classmethod
def from_completions(cls, path, kind, drop_dupes=True, sample_df=None):
df = load_completions(path)
df = df.reset_index(drop=True) # add index
df["data"] = df["data"].map(lambda x: x["image"])
df["data"] = df["data"].map(lambda x: x.split("?")[0])
df["data"] = df["data"].apply(lambda x: Path(x).name)
if any(
df["data"].str.contains("-")
): # removes labelstudio hash from data loaded via web interface
df["data"] = df["data"].str.split("-", expand=True)[1]
if drop_dupes:
df = df.drop_duplicates(subset="data", keep="last")
if kind == "classification":
empty_rows = df[df["result"].apply(lambda x: len(x) == 0)].index
df = df.drop(empty_rows)
df["result"] = df["result"].map(lambda x: x[0])
if kind == "label":
df["result"] = df["result"].map(
lambda x: "|".join(map(str, x)) if len(x) >= 1 else x
)
df["result"] = df["result"].map(lambda x: "" if len(x) == 0 else x)
return cls(df)
def merge_sample(self, sample_df):
self.merged_df = anno_sample_merge(sample_df, self.annotation_df)
def export_merged(self, out_fn):
self.merged_df.to_csv(out_fn)
def export_annotations(self, out_fn):
df = self.annotation_df
if not Path(out_fn).exists():
Path(out_fn).touch()
suffix = Path(out_fn).suffix
if suffix == ".csv":
_df_to_csv(df, out_fn)
if suffix == ".json":
_df_to_json(df, out_fn)
if suffix == ".pkl":
_df_to_pkl(df, out_fn)
show_doc(nnAnnotations)
show_doc(nnAnnotations.from_completions)
annotations = nnAnnotations.from_completions(
"../ph/ads/ad_annotations/", "classification"
)
annotations
annotations.labels
annotations.label_counts
show_doc(nnAnnotations.merge_sample)
annotations.merge_sample(sample_df)
annotations.merged_df.head(2)
show_doc(nnAnnotations.export_merged)
annotations.export_merged("testmerge.csv")
show_doc(nnAnnotations.from_completions)
# hide
Path("testmerge.csv").unlink()
annotations = nnAnnotations.from_completions(
"../ph/ads/ad_annotations/", "classification"
)
annotations.annotation_df.head(2)
from nbdev.export import notebook2script
notebook2script()
```
| true |
code
| 0.533458 | null | null | null | null |
|
## Compute a Monte Carlo integral for any specified function.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
```
Riofa-Gean Fernandez ID: 1396498
```
N = 500 # Number of points
a = 0 #x-axis min to replace
b = 1.75 #x-axis max to replace
def f(x):
return np.cos(x) #function to replace
x = np.arange(a,b,0.01) #(start, stop, step interval)
y = f(x) #function
d = max(y) #y-axis maximum
c = min(y) #y-axis minimum
#compute the number of random points
x_rand = a + (b - a)*np.random.random(N)
y_rand = np.random.random(N)*d
ind_below = np.where(y_rand < f(x_rand)) #points below the function
ind_above = np.where(y_rand >= f(x_rand)) #points above the function
#plot the function
pts_below = plt.scatter(x_rand[ind_below], y_rand[ind_below], label = "Points below function", color = "green")
pts_above = plt.scatter(x_rand[ind_above], y_rand[ind_above], label = "Points above function", color = "blue")
plt.plot(x, y, label = "Function", color = "red")
plt.legend(loc = 'lower center', ncol = 2)
int_answer_1 = len(ind_below[0])/(N)*((b-a)*(d-c)) #first integral estimate (By R. Fernandez and S. Yuen)
#print the answer
print ("Number of points above the function:", len(ind_above[0]))
print ("Number of points below the function:", len(ind_below[0]))
print ("Fraction of points below the function:", int_answer_1) #By S. Yuen
```
Sierra Yuen ID: 1495259
```
N = 10000 #number of points
a2 = 0 #x-axis minimum
b2 = 1.75 #x-axis maximum
def f(x):
return np.cos(x) #function to replace
x = np.arange(a2,b2,0.01) #(start,stop,step interval)
y = f(x) #function
d2 = max(y) #y-axis maximum
c2 = min(y) #y-axis minimum
#compute the number of random points
x_rand = a2 + (b2 - a2)*np.random.random(N)
y_rand = np.random.random(N)*d2
ind_below = np.where(y_rand < f(x_rand)) #points below the function
ind_above = np.where(y_rand >= f(x_rand)) #points above the function
#plot the function
pts_below = plt.scatter(x_rand[ind_below], y_rand[ind_below], label = "Dots below function", color = "green")
pts_above = plt.scatter(x_rand[ind_above], y_rand[ind_above], label = "Dots above function", color = "blue")
plt.plot(x, y, label = "Function", color = "red")
plt.legend(loc = 'lower center', ncol = 2)
int_answer_2 = len(ind_below[0])/(N)*((b2-a2)*(d2-c2)) #second integral estimate (By R. Fernandez and S. Yuen)
#print the answer
print ("Number of points above the function:", len(ind_above[0]))
print ("Number of points below the function:", len(ind_below[0]))
print ("Fraction of points below the function:", int_answer_2)
#specify a tolerance for the integration
tolerance = int_answer_2 - int_answer_1
#print the tolerance
print(tolerance)
```
| true |
code
| 0.545588 | null | null | null | null |
|
## PmodOLED Example
## Contents
* [Introduction](#Introduction)
* [Setup the board and PmodOLED](#Setup-the-board-and-PmodOLED,-and-load-the-overlay)
* [Write to the PmodOLED](#Write-to-the-PmodOLED)
* [Draw some patterns](#Draw-some-patterns)
* [Create a new Python function](#Create-a-new-Python-function)
* [Putting it all together](#Putting-it-all-together)
----
## Introduction
This demonstration shows how to use the [PmodOLED](https://reference.digilentinc.com/reference/pmod/pmodoled/start) using the PYNQ-Z1 or PYNQ-Z2 board.
----
## Setup the board and PmodOLED, and load the overlay
### Connect the PmodOLED to the board.
In this example the ***PmodOLED*** should be connected to ***PMODA.***
Download the base overlay
```
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
```
Create an oled instance
```
from pynq.lib.pmod import Pmod_OLED
# Connect to PMODA
pmod_oled = Pmod_OLED(base.PMODA)
```
## Write to the PmodOLED
```
pmod_oled.clear()
pmod_oled.write(' Welcome\n to\n PYNQ!')
```
#### You should now see the text output on the OLED.
Try another message:
```
pmod_oled.clear()
pmod_oled.write('Python and Zynq\nProductivity & performance')
```
Clear the display when finished.
```
pmod_oled.clear()
```
System information can be captured and stored in Python variables, and written to the peripheral.
```
hostname = !hostname
#Get primary IP address
ip_addr = !hostname -I | cut -f1 -d' '
#Get CPU architecture
cpu = !cat /proc/cpuinfo | grep "model name" | head -n 1 | cut -f3 -d' '
pmod_oled.write(hostname[0] + "\nIP:" + ip_addr[0] + '\nCPU: ' + cpu[0])
pmod_oled.clear()
```
----
## Draw some patterns
The PmodOLED includes some built in functions running in C code on the IOP. For drawing lines and rectangles, the `draw_line()` `draw_rectangle()` functions are provided.
The OLED display area is 32 pixels x 128 pixels.
### Draw a line
A line can be drawn by specifying two co-ordinates: pmod_oled.draw_line(x1, y1, x2,y2)
You can execute the next cell, or change the co-ordinates and execute the cell below to draw another line.
`pmod_oled.clear()` should be called to clear the display if you do not want lines drawn on top of previous lines. If the bitstream is reloaded the display will also be cleared.
```
pmod_oled.draw_line(0,0,128,32)
pmod_oled.draw_line(0,32,128,0)
pmod_oled.clear()
pmod_oled.draw_line(64,0,64,32)
```
Clear the display when finished.
```
pmod_oled.clear()
```
### Draw a rectangle
You can draw a rectangle in a similar way by specifying two co-ordinates: pmod_oled.draw_line(x1, y1, x2,y2). This will draw a rectangle using the two points as opposite corners
```
pmod_oled.draw_rect(60,5,80,25)
pmod_oled.draw_rect(105,0,120,28)
```
Clear the display when finished.
```
pmod_oled.clear()
```
----
## Create a new Python function
More functions could be implemented in the C code running on the IOP to generate other patterns. The existing functions can also be extended in Python to add more functionality.
The following cell defines a function to draw circles on the PmodOLED.
```
import math
# Draw a circle
# Screen resolution is 128x32
def draw_circle(cx,cy, r):
for i in range (0, 360):
x = cx + r * math.cos(i*math.pi/180)
if x > 127:
x = 127
if x < 0:
x = 0
y = cy + r * math.sin(i*math.pi/180)
if y > 31:
y = 31
if y < 0:
y = 0
pmod_oled.draw_line(int(x),int(y),int(x+1),int(y))
```
### Draw the circle
You can draw a circle by using the function which has just been created, and by specify a co-ordinate and the radius.
```
pmod_oled.clear()
draw_circle(64,16,15)
```
Remember the display is 128x32 pixels. If the circle exceeds the display area it will be clipped.
```
pmod_oled.clear()
draw_circle(64,32,15)
```
Additional functionality can be added easily in Python, but note that functions in Python will be slower than using the C functions running directly on the IOP. (In this case, the circle co-ordinates are calculated in Python, and the IOP draw_line() is called 360 times which is much slower than simply drawing a single line using the draw_line() function.)
----
## Putting it all together
Draw some patterns
```
pmod_oled.clear()
pmod_oled.draw_line(0,0,128,32)
pmod_oled.draw_rect(60,5,80,25)
pmod_oled.draw_rect(105,0,120,28)
draw_circle(16,16,16)
pmod_oled.clear()
for i in range (0,9):
draw_circle(16,16,i*2)
for i in range (0,6):
draw_circle(48,16,1+i*3)
for i in range (0,5):
draw_circle(80,16,i*4)
for i in range (0,4):
draw_circle(111,16,1+i*5)
```
| true |
code
| 0.274376 | null | null | null | null |
|
# Tutorial 4: Scattering calculations with Tully's models
```
import sys
import cmath
import math
import os
import time
import h5py
import matplotlib.pyplot as plt # plots
import numpy as np
#from matplotlib.mlab import griddata
%matplotlib inline
if sys.platform=="cygwin":
from cyglibra_core import *
elif sys.platform=="linux" or sys.platform=="linux2":
from liblibra_core import *
from libra_py import units
import libra_py.models.Tully as Tully
from libra_py import tsh
from libra_py import tsh_stat
from libra_py import data_conv
from libra_py import data_savers
from libra_py import dynamics_plotting
#from libra_py import dynamics_exact
import util.libutil as comn
import libra_py.dynamics.exact.compute as compute
import libra_py.dynamics.exact.save as save
import libra_py.dynamics.exact.plot as plot
plt.rc('axes', titlesize=24) # fontsize of the axes title
plt.rc('axes', labelsize=20) # fontsize of the x and y labels
plt.rc('legend', fontsize=20) # legend fontsize
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
plt.rc('figure.subplot', left=0.2)
plt.rc('figure.subplot', right=0.95)
plt.rc('figure.subplot', bottom=0.13)
plt.rc('figure.subplot', top=0.88)
colors = {}
colors.update({"11": "#8b1a0e"}) # red
colors.update({"12": "#FF4500"}) # orangered
colors.update({"13": "#B22222"}) # firebrick
colors.update({"14": "#DC143C"}) # crimson
colors.update({"21": "#5e9c36"}) # green
colors.update({"22": "#006400"}) # darkgreen
colors.update({"23": "#228B22"}) # forestgreen
colors.update({"24": "#808000"}) # olive
colors.update({"31": "#8A2BE2"}) # blueviolet
colors.update({"32": "#00008B"}) # darkblue
colors.update({"41": "#2F4F4F"}) # darkslategray
clrs_index = ["11", "21", "31", "41", "12", "22", "32", "13","23", "14", "24"]
```
## 1. Define the model & plot the PES
```
def compute_model(q, params, full_id):
model = params["model"]
res = None
if model==1:
res = Tully.Tully1(q, params)
elif model==2:
res = Tully.Tully2(q, params)
elif model==3:
res = Tully.Tully3(q, params)
return res
def potential(q, params):
"""
Thin wrapper of the model Hamiltonians that can be used in
the fully-quantum calculations
"""
# Diabatic properties
obj = compute_model(q, params, Py2Cpp_int([0,0]))
# Adiabatic properties
nadi = len(params["E_n"])
ndof = 1
ham = nHamiltonian(nadi, nadi, ndof) # ndia, nadi, nnucl
ham.init_all(2)
ham.compute_diabatic(compute_model, q, params)
ham.compute_adiabatic(1);
obj.ham_adi = ham.get_ham_adi()
obj.dc1_adi = CMATRIXList()
for n in range(ndof):
x = ham.get_dc1_adi(n)
for i in range(nadi):
for j in range(nadi):
if i!=j:
#pass
if math.fabs(x.get(i,j).real)>1e+10:
x.set(i,j, 0.0+0.0j)
x.set(j,i, 0.0+0.0j)
obj.dc1_adi.append( x )
return obj
param_sets = [ {"model":1, "E_n":[0.0, 0.0], "nstates":2 },
{"model":2, "E_n":[0.0, 0.0], "nstates":2 },
{"model":3, "E_n":[0.0, 0.0], "nstates":2 }
]
plot_params = {"colors": colors, "clrs_index": clrs_index, "xlim":[-15, 15], "ylim":[-0.015, 0.015 ]}
dynamics_plotting.plot_surfaces(compute_model, [ param_sets[0] ], [0,1], -15.0, 15.0, 0.05, plot_params)
```
## 2. Run the calculations
```
model_params = dict(param_sets[0])
properties_to_save = [ "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia",
"Epot_adi", "Etot_dia", "Etot_adi", "norm_dia", "norm_adi",
"pop_dia", "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi",
"p_dia", "p_adi", "p2_dia", "p2_adi",
"denmat_dia", "denmat_adi", "custom_pops",
"PSI_dia", "PSI_adi", "reciPSI_dia", "reciPSI_adi" ]
params = { "nsteps":200, "dt":10.0, "progress_frequency":0.1,
"rmin":[-35.0], "rmax":[35.0], "dx":[0.1], "nstates":2,
"x0":[-10.0], "p0":[20.0], "istate":[1,0], "masses":[2000.0], "k":[0.001],
"integrator":"SOFT",
"prefix":"Tut4-1",
"hdf5_output_level":3, "compression_level":[0,0,0], "use_compression":0,
"mem_output_level":3,
"txt_output_level":0,
"properties_to_save": properties_to_save,
"custom_pops":[ [0, [-40], [-5]], [0, [-5], [5]], [0, [5], [40]],
[1, [-40], [-5]], [1, [-5], [5]], [1, [5], [40]]
]
}
params1 = dict(params)
params1.update({ "prefix":"Tut4-1" })
res = compute.run_relaxation( params1, potential, model_params )
```
## 3. Plot the results
```
with h5py.File("Tut4-1/data.hdf", 'r') as f:
t = list(f["time/data"][:])
print(t)
#print(list(f["boxed_pops/0/data"][:, 0, 0]))
print(list(f["custom_pops/data"][:, 0, 0, 0]))
print(list(f["pop_adi/data"][:, 0, 0]))
plot_params = {"prefix":"Tut4-1", "filename":"mem_data.hdf", "hdf5_output_level":2,
"which_dofs":[0], "which_adi_states":[0, 1], "which_dia_states":[0, 1],
"properties_to_save":
[ "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia",
"Epot_adi", "Etot_dia", "Etot_adi", "norm_dia", "norm_adi",
"pop_dia", "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi",
"p_dia", "p_adi", "p2_dia", "p2_adi",
"denmat_dia", "denmat_adi", "custom_pops",
"PSI_dia", "PSI_adi", "reciPSI_dia", "reciPSI_adi" ]
}
plot.plot_hdf5(plot_params)
def plot_custom_pops(plot_params):
"""
This function is meant to plot the results stored in the hdf files generated by the exact dynamics runs
Args:
prefix ( string ): the name of the directory containing the input HDF5 file
This directory will also be used to output the generated picture files [ default : "out"]
filename ( string ): name of the HDF5 file to read [ default: "data.hdf"]
output_level ( int ): the level of info contained in the HDF5 file [ default : 3]
which_adi_states ( list of ints ) : indices of the adiabatic states to print [ default: [0] ]
which_dia_states ( list of ints ) : indices of the diabatic states to print [ default: [0] ]
colors ( dictionary ): the definition of the colors to use
clrs_index ( list of strings ) : defines the mapping of the colors on integers and vice versa
"""
plt.rc('axes', titlesize=24) # fontsize of the axes title
plt.rc('axes', labelsize=20) # fontsize of the x and y labels
plt.rc('legend', fontsize=20) # legend fontsize
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.rc('ytick', labelsize=16) # fontsize of the tick labels
plt.rc('figure.subplot', left=0.2)
plt.rc('figure.subplot', right=0.95)
plt.rc('figure.subplot', bottom=0.13)
plt.rc('figure.subplot', top=0.88)
colors = {}
colors.update({"11": "#8b1a0e"}) # red
colors.update({"12": "#FF4500"}) # orangered
colors.update({"13": "#B22222"}) # firebrick
colors.update({"14": "#DC143C"}) # crimson
colors.update({"21": "#5e9c36"}) # green
colors.update({"22": "#006400"}) # darkgreen
colors.update({"23": "#228B22"}) # forestgreen
colors.update({"24": "#808000"}) # olive
colors.update({"31": "#8A2BE2"}) # blueviolet
colors.update({"32": "#00008B"}) # darkblue
colors.update({"41": "#2F4F4F"}) # darkslategray
clrs_index = ["11", "21", "31", "41", "12", "22", "32", "13","23", "14", "24"]
# Parameters and dimensions
critical_params = [ ]
default_params = { "prefix":"out", "filename":"data.hdf", "hdf5_output_level":2,
"colors":colors, "clrs_index":clrs_index,
"figs":[]
}
comn.check_input(plot_params, default_params, critical_params)
filename = plot_params["filename"]
prefix = plot_params["prefix"]
hdf5_output_level = plot_params["hdf5_output_level"]
colors = plot_params["colors"]
clrs_index = plot_params["clrs_index"]
figs = plot_params["figs"]
out_prefix = prefix
with h5py.File(F"{prefix}/{filename}", 'r') as f:
t = None
if "time" in properties_to_save:
t = list(f["time/data"][:])
#=============== Populations ======================
if t != None:
nfigs = len(figs)
for ifig in range(nfigs):
plt.figure(ifig, figsize=(12, 12)) # dpi=300, frameon=False)
plt.subplot(1, 1, 1)
#plt.ylim(0, 1)
plt.title(F'{figs[ifig][0]}' )
plt.xlabel('Time, a.u.')
plt.ylabel('Population')
nlines = len(figs[ifig])
for i in range(1, nlines):
line_label = figs[ifig][i][0]
pop_type = figs[ifig][i][1]
istate = figs[ifig][i][2]
line_color_index = figs[ifig][i][3]
clr = colors[clrs_index[ line_color_index ]]
Pi = list(f["custom_pops/data"][:, pop_type, istate, 0])
plt.plot(t, Pi, label=F'{line_label}', linewidth=10, color = clr)
plt.legend()
plt.savefig(F"{prefix}/Custom_pops_{i-1}.png", dpi=300)
plt.savefig(F"{prefix}/Custom_pops_{i-1}.pdf", dpi=300)
plt.show()
plt.close()
_plot_params = { "prefix":"Tut4-1", "filename":"mem_data.hdf", "hdf5_output_level":2,
"colors":colors, "clrs_index":clrs_index,
"figs":[ [ "Diabatic pops",
["reflection on the lower state", 0, 0, 0],
["unreacted on the lower state", 1, 0, 1],
["transmission on the lower state", 2, 0, 2]
],
[ "Diabatic pops",
["reflection on the upper state", 0, 1, 0],
["unreacted on the upper state", 1, 1, 1],
["transmission on the upper state", 2, 1, 2]
],
[ "Adiabatic pops",
["reflection on the lower state", 3, 0, 0],
["unreacted on the lower state", 4, 0, 1],
["transmission on the lower state", 5, 0, 2]
],
[ "Adiabatic pops",
["reflection on the upper state", 3, 1, 0],
["unreacted on the upper state", 4, 1, 1],
["transmission on the upper state", 5, 1, 2]
]
]
}
plot_custom_pops(_plot_params)
```
## Scattering probabilities
Now, lets repeat the calculations many times, with different initial momenta and save all the results in different folders
```
prefix = "Tut4-2"
model_params = dict(param_sets[0])
properties_to_save = [ "timestep", "time", "Ekin_dia", "Ekin_adi", "Epot_dia",
"Epot_adi", "Etot_dia", "Etot_adi", "norm_dia", "norm_adi",
"pop_dia", "pop_adi", "q_dia", "q_adi", "q2_dia", "q2_adi",
"p_dia", "p_adi", "p2_dia", "p2_adi",
"denmat_dia", "denmat_adi", "custom_pops",
"PSI_dia", "PSI_adi", "reciPSI_dia", "reciPSI_adi" ]
params = { "nsteps":200, "dt":10.0, "progress_frequency":0.1,
"rmin":[-35.0], "rmax":[35.0], "dx":[0.1], "nstates":2,
"x0":[-10.0], "p0":[20.0], "istate":[1,0], "masses":[2000.0], "k":[0.001],
"integrator":"SOFT",
"prefix":"Tut4-2",
"hdf5_output_level":0, "compression_level":[0,0,0], "use_compression":0,
"mem_output_level":3,
"txt_output_level":0,
"properties_to_save": properties_to_save,
"custom_pops":[ [0, [-40], [-5]], [0, [-5], [5]], [0, [5], [40]],
[1, [-40], [-5]], [1, [-5], [5]], [1, [5], [40]]
]
}
if not os.path.isdir(prefix):
os.mkdir(prefix)
P0 = [5.0, 6.0, 7.0, 8.0, 10.0, 12.0, 13.0, 15.0, 18.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
ninit = len(P0)
for i in range(ninit):
print(F"=============== initial momentum {P0[i]} ============")
if not os.path.isdir(F"{prefix}/{i}"):
os.mkdir(F"{prefix}/{i}")
params1 = dict(params)
params1.update({"prefix": F"{prefix}/{i}", "p0":[P0[i] ], "nsteps":int(200 * (200.0/P0[i])) })
compute.run_relaxation( params1, potential, model_params )
P0 = [ 5.0, 6.0, 7.0, 8.0, 10.0, 12.0, 13.0, 15.0, 18.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0]
ninit = len(P0)
plt.figure(1, figsize=(48, 12)) # dpi=300, frameon=False)
plt.subplot(1, 3, 1)
plt.title("Unreacted pop")
plt.xlabel('Time, a.u.')
plt.ylabel('Population')
for i in [7]: #range(ninit):
nclrs = len(clrs_index)
clr = colors[clrs_index[ i % nclrs]]
with h5py.File(F"Tut4-2/{i}/mem_data.hdf", 'r') as f:
t = list(f["time/data"][:])
p0 = list(f["custom_pops/data"][:, 4, 0, 0]) # adiabatic not reacted, on state 0
p1 = list(f["custom_pops/data"][:, 4, 1, 0]) # adiabatic not reacted, on state 1
p_unreact = []
sz = len(p0)
for j in range(sz):
p_unreact.append(p0[j] + p1[j])
#print(F" === init cond = {i} ===")
#print(p)
plt.plot(t, p_unreact, label=F'{i}', linewidth=10, color = clr)
plt.legend()
plt.subplot(1, 3, 2)
plt.title("Reflected pop")
plt.xlabel('Time, a.u.')
plt.ylabel('Population')
for i in [7]: #range(ninit):
nclrs = len(clrs_index)
clr = colors[clrs_index[ i % nclrs]]
with h5py.File(F"Tut4-2/{i}/mem_data.hdf", 'r') as f:
t = list(f["time/data"][:])
p0 = list(f["custom_pops/data"][:, 3, 0, 0]) # adiabatic not reacted, on state 0
p1 = list(f["custom_pops/data"][:, 3, 1, 0]) # adiabatic not reacted, on state 1
p_refl = []
sz = len(p0)
for j in range(sz):
p_refl.append(p0[j] + p1[j])
#print(F" === init cond = {i} ===")
#print(p)
plt.plot(t, p_refl, label=F'{i}', linewidth=10, color = clr)
plt.legend()
```
| true |
code
| 0.406361 | null | null | null | null |
|
# Classification of quantum states with high dimensional entanglement
## Circuits and computations
Version compatible with 1st and 2d pilot studies
```
import numpy as np
import copy
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble
from qiskit.tools.visualization import *
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal,
CompleteMeasFitter, TensoredMeasFitter)
import json
from scipy.signal import savgol_filter
import time
from qiskit.tools.monitor import job_monitor
from o_utils import ora # classifier utilities
from o_plot import opl # utilities for result plot
from c_utils import new_cut # circuit building utilities
def json_dic_loader(dic_name):
f = open(data_directory+dic_name+'.json')
return json.load(f)
```
#markdown for safety on demo
def json_dic_dumper(dic, dic_name):
with open(data_directory+dic_name+'.json', 'w') as f:
json.dump(dic,f)
```
# common code for calling the classifier for ideal device and for real devices
def add_single_dic(target_data_list):
start_time = time.time()
print("started",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name,
"mitigation",mit_str,o_metric,model_name)
# added for D,S,M choice. Mainstream : mixed set of 20 states
first = 0
last = nb_states
if unique_char == "D":
last = int(nb_states/2)
elif unique_char == "S":
first = int(nb_states/2)
# get the classifier error curve in function of the number of shot and the "safe shot number"
error_curve, safe_rate, ernb = ora.provide_error_curve(PD_model=model_dic[model_name][first:last,:],
PD_test=PD_test[first:last,:],
trials=trials,
window=window,
epsilon=epsilon,
max_shots=max_shots,
pol=pol,
verbosality=verbosality)
tail = savgol_filter(ernb, window, pol, axis=0)
len_curve = len(error_curve)
safe_shot_nb = len_curve - int((window-1)/2) # OK
print('safe_shot_nb',safe_shot_nb, 'safe_rate',safe_rate, "nb trials:",trials)
err_rates = tail[int((window-1)/2),:]/trials
err_rate_max = np.max(err_rates)
err_rate_min = np.min(err_rates)
r=4
print("savgol interpolated error rate mean:", np.round(np.mean(err_rates),r),
"min:", np.round(err_rate_min,r),
"max:", np.round(err_rate_max,r), "for",
[ien for ien, jen in enumerate(err_rates) if jen == err_rate_max])
end_time = time.time()
#save the data in a list of dictionaries :
single_dic={"project":mitig_name,
"id_gates":id_gates,
"mitigation":mit_str,
"model":model_name,
"metric":o_metric, "device":project_device,
"curve_length":len_curve,
"shots": safe_shot_nb,
"shots_rate": safe_rate,
"error_curve":error_curve,
"trials":trials,"window":window,
"epsilon":epsilon,"SG_pol": pol,
"computation_time":end_time-start_time,
"time_completed":time.strftime('%d/%m/%Y %H:%M:%S'),
"trials":trials,
"QV": QV_dic[project_device],
"fidelity": fidelity_dic[project_device],
"error_nb":ernb}
target_data_list.append(single_dic)
print("completed",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name,
"mitigation",mit_str,o_metric,model_name,"\n")
```
## Set up the simulator and layout for 5 qubits
```
simulator = Aer.get_backend('qasm_simulator')
#specify the layout of the devices
used_qubits = 5
qubit_list = [0,1,2,3,4]
#short_version = False
#program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+
program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+
Flag_char = "DS" # this for a mix of two types of separable states
if len(Flag_char) >= 2:
unique_char = "M"
else:
unique_char = Flag_char
# These dictionaries for the devices used in the study
if program_name == "QAD":
fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380,
"ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ideal_device': 1.0}
data_directory = "data_files/"
elif program_name == "AL2":
fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974,
"ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ideal_device': 1.0}
data_directory = "data2_files/"
QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0,
"ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf}
dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our",
"ibmqx2": 'Yor', 'ideal_device': "Ide"}
# specify the device: here first the ideal noise-free device
project_device = 'ideal_device'
device_name = dev_dic[project_device]
# specify the nb of id gates between state creation and measurements
# zero for the ideal device
id_gates = 0
str_nb_id = str(id_gates)
zfilled = str_nb_id.zfill(4-len(str_nb_id))
# tail of the file names for RAM storage
mitig_name = program_name + "_" + device_name
project_name = mitig_name + "_" + unique_char + zfilled
print(mitig_name)
print(project_name)
# establish the result label list
# meas_calibs will be used for mitigation in the real device section
qr = QuantumRegister(used_qubits)
meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal')
nb_labels=len(label_list)
print(nb_labels,label_list)
len(meas_calibs)
# permutation list
# here it is simple to write down the list,
# but a version using itertools will be wellcome for >5 qubits projects
if used_qubits == 5:
q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3],
[0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]]
else:
print("work in progress - meanwhile please provide the list of permutations")
```
## Create the quantum states
```
# define the two subsets of 10 separable states
if program_name == "QAD":
state_1a = ["W","Phi+"]
state_1b = ["GHZ","Psi+"]
elif program_name == "ALT" or "AL2":
state_1a = ["W","Psi+"]
state_1b = ["Wbar","Phi+"]
l_states = state_1a+state_1b
l_states
# version 20 circuits for demonstration
# (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep")
# these circuits limited to state creation are ready to be saved
# for ultimately building circuits adapted to noisy simulator and real devices
# as option, these circuits will include a row of id gates between creation and measurements
circ_ori = []
for i_s in range(0,len(l_states),2):
for perm in q_perm:
mycircuit = QuantumCircuit(used_qubits, used_qubits)
mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1])
circ_ori.append(mycircuit)
# add measurement section to the circuit set newly created:
nb_states = len(circ_ori)
circ_ideal = copy.deepcopy(circ_ori)
for i_state in range(nb_states):
new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list)
ideal_dic = {}
```
## Obtain result distributions on noise free simulator
#### You may skip this section and go to:
#### "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier"
```
# execute on noise free simulator
s_sim = 12000
job_simul = execute(circ_ideal, backend=simulator, shots=s_sim)
tot_results_simul = job_simul.result()
# establish a dictionary of count results on noise free simulator:
# (this step is only useful if ram storage is performed)
void_counts = dict(zip(label_list, np.zeros(2**used_qubits)))
tot_results_sim_dic = {}
for i_state in range(nb_states):
counts_simul = copy.deepcopy(void_counts)
counts_simul.update(tot_results_simul.get_counts(i_state))
ideal_dic[str(i_state)]=counts_simul
```
#markdown for security
json_dic_dumper(ideal_dic,"ideal_dic_"+project_name)
Example of circuit for separable state of the first type ($W\otimes\Phi^+\; or\; W\otimes\Psi^+$):
```
i_state_test = 10
print(device_name, "circuit #",i_state_test)
circ_ideal[i_state_test].draw(output='mpl')
print(device_name, "circuit #",i_state_test)
plot_histogram(ideal_dic[str(i_state_test)],
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
```
Example of circuit for separable state of the second type ($GHZ\otimes\Psi^+ \; or\; \bar{W}\otimes\Phi^+$):
```
i_state_test = 10
print(device_name, "circuit #",i_state_test)
circ_ideal[i_state_test].draw(output='mpl')
print(device_name, "circuit #",i_state_test)
plot_histogram(ideal_dic[str(i_state_test)],
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
```
### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
```
# try loading the dictionary of results if its creation was skipped
if len(ideal_dic) == 0:
ideal_dic = json_dic_loader("ideal_dic_"+project_name)
nb_states = len(ideal_dic)
nb_labels = len(list(ideal_dic.values())[0])
s_sim = sum(list(ideal_dic.values())[0].values())
PD_ideal = np.ndarray((nb_states,nb_labels))
for i_state in range(nb_states):
PD_ideal[i_state, :] = list(ideal_dic[str(i_state)].values())
# now a little trick to get the ideal values from the simulator approximated values
with np.errstate(divide='ignore'): # ignore the divide by zero warning
PD_ideal = 1/np.round(s_sim/(PD_ideal))
# have a look at the matrix head and tail:
print("first and last state probability distributions:")
print(np.round(np.vstack((PD_ideal[0:1,:],PD_ideal[-1:,:])),4))
```
## Monte Carlo simulation for the ideal device
```
# here will be appended the data we want for the curve plot
ideal_data_list=[]
```
### you may skip this cell and get stored curves by running the next cell
```
# you may want to skip this cell as it will require a long time
# because of the high number of trials required by the Monte Carlo simulation for each nb o shots value
# the following values are defined in the study summary (readme file):
trials=100 # to be set to 10000 if not demo
window=5 # shorter window than for the real device counts
epsilon = .001
min_shots = 5
max_shots = 100
pol=2
subset = None # variable not used here
verbosality = 5 # printing step for intermediate results when increasing the experiment shot number
PD_test = PD_ideal
mitigation_dic = {"Na": None}
o_metrics_desired = ['jensenshannon', 'sqeuclidean']
model_dic = {"ideal_sim": PD_ideal}
for mit_str, mitigation in mitigation_dic.items():
if mitigation != None: # thus only for counts on real device
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
for o_metric in o_metrics_desired:
for model_name in model_dic.keys():
add_single_dic(ideal_data_list)
```
markdown for safety
json_dic_dumper(ideal_data_list,"ideal_device_data_list_"+project_name)
```
# get the stored results of the Monte Carlo simulation in case you skipped the previous step
if len(ideal_data_list) == 0:
ideal_data_list = json_dic_loader("ideal_device_data_list_"+project_name)
# have a look at the mean error rate curves and error rate at save shot number n_s
# NB the r_hat_mean curves and legend reported r_hat_max errors the unsmoothed values
opl.plot_curves(ideal_data_list,np.array([0,1]),
"Jensen-Shannon vs squared euclidean distance - $\epsilon=0.001$" ,
["model"], ["device","metric"],
right_xlimit = 20, bottom_ylimit = -0.001, top_ylimit = 0.05)
```
# Real device section
```
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
project_device = 'ibmq_valencia'# you may choice here a different backend
device_name = dev_dic[project_device]
mitig_name = program_name + "_" + device_name
print(mitig_name)
#determine here the backend
device = provider.get_backend(project_device) # the backend names are listed here above
properties = device.properties()
coupling_map = device.configuration().coupling_map
```
# obtain mitigation filter
#markdown for demo
nb_shots_cal = 8192 # set here the number of shots for the calibration phase
print("backend:", device.name(), "qubit_list:", qubit_list)
job_cal = execute(meas_calibs, backend=device, shots=nb_shots_cal)
print(job_cal.job_id())
job_monitor(job_cal)
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print("DMY: ",time_exp)
#markdown for demo
#here we save mitigation results
cal_results = job_cal.result()
cal_results_dic = cal_results.to_dict()
#to make date in dictionary serializable if there is a 'date' key:
if 'date' in cal_results_dic.keys():
cal_results_dic['date']=str(cal_results_dic['date'])
#markdown for demo and security
#dump
json_dic_dumper(cal_results_dic,"cal_results_dic_"+ mitig_name)
```
# retrieve the corresponding measurement mitigation filter obtained at experimental time
# use a fake job because use of the from_dict method
simulator = Aer.get_backend('qasm_simulator')
fake_job_cal = execute(meas_calibs, backend=simulator, shots=1)
fake_cal_results = fake_job_cal.result()
cal_results_dic = json_dic_loader("cal_results_dic_"+mitig_name)
if 'date' in cal_results_dic.keys():
str(cal_results_dic['date'])
cal_results = fake_cal_results.from_dict(cal_results_dic)
meas_fitter = CompleteMeasFitter(cal_results, label_list, qubit_list=qubit_list, circlabel='mcal')
meas_filter = meas_fitter.filter
# have a look at the average measurement fidefily of this device:
print("Average Measurement Fidelity was: %f" % meas_fitter.readout_fidelity(), "for",project_device)
```
### Transpile the basic circuits for running on real device
In this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed).
The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000'
```
id_gates = 0
str_nb_id = str(id_gates)
zfilled = str_nb_id.zfill(4-len(str_nb_id))
project_name = mitig_name + "_" + unique_char + zfilled
print(project_name)
# transpile
verbose = True
summary_dic = {}
seed_transpiler_list = list(range(nb_states))
real_circs = []
start_time = time.strftime('%d/%m/%Y %H:%M:%S')
print("Start at DMY: ",start_time)
for i_state in list(range(nb_states)):
# prepare circuit to be transpiled
circuit = copy.deepcopy(circ_ori[i_state])
if id_gates > 0:
circuit.barrier()
for id_gates_index in range(id_gates):
for index, value in enumerate(qubit_list):
circuit.id(value)
new_cut.add_barrier_and_measure(circuit, qubit_list)
summary = []
depth_list = []
Q_state_opt_new = transpile(circuit, backend=device,
coupling_map = coupling_map,
seed_transpiler=seed_transpiler_list[i_state],
optimization_level=2,
initial_layout=qubit_list)
summary_dic[i_state] = {"depth": Q_state_opt_new.depth(),
'circuit':Q_state_opt_new}
real_circs.append(Q_state_opt_new)
if verbose:
print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"],
"DMY: ",time.strftime('%d/%m/%Y %H:%M:%S'))
end_time = time.strftime('%d/%m/%Y %H:%M:%S')
print("Completed at DMY: ",end_time)
i_state_test = 10
print(project_device, "circuit #",i_state_test,
"circuit length:",real_circs[i_state_test].depth()) #summary_dic[i_state_test]['depth'])
# you may want to skip this if large nb of id gates before measurement
real_circs[i_state_test].draw(output='mpl')
#check a circuit on noise-free simulator
job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim)
print(project_device, "circuit #",i_state_test, "on noise free simulator")
plot_histogram(job_simul.result().get_counts(),
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
```
# run job
#markdown for demo
#run the circuits
nb_shots = 8192
print("backend:", device.name(), "qubit_list:", qubit_list)
time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')
print("DMY: ",time_exp)
job_real = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)
job_real_id = job_real.job_id()
print("job id:", job_real_id)
job_monitor(job_real)
time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')
print("DMY: ",time_exp, "job id:", job_real_id)
tot_results_real = job_real.result()
empirical_dic ={}
for i_state_count, state_count in enumerate(tot_results_real.get_counts()):
empirical_dic[str(i_state_count)] = state_count
#markdown for safety
json_dic_dumper(job_real_id,"job_real_id_"+ project_name)
#markdown for safety at demo
json_dic_dumper(empirical_dic,"experimental_"+ project_name)
#markdown for demo
#2d JOB RUN
nb_shots = 8192
#run the circuits
print("backend:", device.name(), "qubit_list:", qubit_list)
time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')
print("DMY: ",time_exp)
job_test = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)
job_test_id = job_test.job_id()
print("job id:", job_test_id)
job_monitor(job_test)
time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')
print("DMY: ",time_exp, "job id:", job_test_id)
tot_results_test = job_test.result()
test_dic ={}
for i_state_count, state_count in enumerate(tot_results_test.get_counts()):
test_dic[str(i_state_count)] = state_count
#markdown for safety at demo
json_dic_dumper(job_test_id,"job_test_id_"+ project_name)
json_dic_dumper(test_dic,"test_"+ project_name)
### Load the transpiled circuits that were actually run
##### legacy: valid only for the GHZ Psi+ / W Phi- combination
otherwise go instead to:
#### "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier"
```
#changing keys of dictionary for merging:
def key_change(ini_dict, i_subset):
ini_list = []
len_ini = len(ini_dict)
for i in range(len_ini):
ini_list.append(str(i+i_subset*len_ini))
return dict(zip(ini_list, list(ini_dict.values())))
if program_name == "QAD":
#retrieve the data corresponding to the 1st project
lfc = list(Flag_char)
circ_ideal =[]
empirical_dic = {}
for i_subset, subset in enumerate(lfc):
qasm_circs_dic = json_dic_loader('qasm_circs_dic_QAD_'+device_name+'_'+ subset + zfilled)
j=0 # j included for project with several transpilation sessions for each device - not used here
qasm_circs = qasm_circs_dic[str(j)]
nb_circs = len(qasm_circs)
for i_circs in range(nb_circs):
circ_ideal.append(QuantumCircuit().from_qasm_str(qasm_circs[i_circs]))
empirical_dic = {**empirical_dic,
**key_change(json_dic_loader("experimental"+"_"+mitig_name +"_"\
+subset+zfilled), i_subset)}
test_dic = copy.deepcopy(empirical_dic)
#nb_states = len(circ_ideal)
```
### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
```
if program_name == "AL2":
empirical_dic = json_dic_loader('experimental_'+project_name)
test_dic = json_dic_loader('test_'+project_name)
def rectify_counts(tot_res, test_cqi,mitigation,m_filter) :
void_counts = dict(zip(label_list, np.zeros(2**used_qubits)))
try:
counts_results_real_test = tot_res[str(test_cqi)]
except KeyError as error:
counts_results_real_test = tot_res[test_cqi]
raw_counts_test = copy.deepcopy(void_counts)
raw_counts_test.update(counts_results_real_test)
if mitigation:
mitigated_results_test = meas_filter.apply(raw_counts_test, method = 'least_squares')
returned_counts = copy.deepcopy(void_counts)
returned_counts.update(mitigated_results_test)
else:
returned_counts = copy.deepcopy(raw_counts_test)
return returned_counts
```
### Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
```
def get_clean_matrix(dic, mitigation,m_filter):
clean_matrix = np.ndarray((nb_states,nb_labels))
for i_state in range(nb_states):
rectified_counts = rectify_counts(dic,i_state, mitigation,m_filter) # get a rectified counts dictionary
clean_matrix[i_state, :] = list(rectified_counts.values())
clean_matrix = clean_matrix/clean_matrix.sum(axis=1, keepdims=True)
return clean_matrix
# We need to create a first matrix version. It will then vary for each considered set of distribution
mitigation = False
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
print("first and last state probability distributions:")
print(np.round(np.vstack((PD_exper[0:1,:],PD_exper[-1:,:])),3))
if program_name == "QAD":
PD_test = copy.deepcopy(PD_exper)
elif program_name == "AL2":
mitigation = False
PD_test = get_clean_matrix(test_dic, mitigation=mitigation,
m_filter=meas_filter)
print("first and last state probability distributions:")
print(np.round(np.vstack((PD_test[0:1,:],PD_test[-1:,:])),3))
```
## Monte Carlo simulation for the real device
```
# here will be appended the data we want for the final plot of this notebook
empirical_data_list=[]
```
### you may want to skip this cell and get stored curves by running the next cell
```
# you may want to skip this cell as it will require a long time
# because of the high number of trials required by the Monte Carlo simulation for each nb o shots value
# the following values are defined in the study summary notebook:
trials=100 # should be 1000 if not demo
window=11
epsilon = .001
max_shots = 500
pol=2
verbosality = 10 # printing step for intermediate results when increasing the experiment shot number
# In this section you can easily make your choice of combinations:
# mitigation or not, metric, model
mitigation_dic = {"no":False, "yes" : True}
#mitigation_dic = {"no":False}
#mitigation_dic = {"yes" : True}
o_metrics_desired = ['jensenshannon', 'sqeuclidean']
#o_metrics_desired = ['jensenshannon']
#o_metrics_desired = ['sqeuclidean']
model_dic = {"empirical": PD_exper, "ideal_sim": PD_ideal}
#model_dic = {"empirical": PD_exper}
#model_dic = {"ideal_sim": PD_ideal}
# Obtain a sequence of results in form of a list of dictionaries
for mit_str, mitigation in mitigation_dic.items():
# here we toggle PD_exper as we toggled mitigation status
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
PD_test = get_clean_matrix(test_dic, mitigation=mitigation,
m_filter=meas_filter)
for o_metric in o_metrics_desired:
print(project_name, model_dic.keys(), o_metric)
for model_name in model_dic.keys():
add_single_dic(empirical_data_list)
```
markdown fo security
json_dic_dumper(empirical_data_list,'Tnemp_data_list_'+project_name)
```
# get the stored results of the Monte Carlo simulation in case you skipped the previous step
if len(empirical_data_list) == 0:
empirical_data_list = json_dic_loader('Nemp_data_list_'+project_name)
# have a look at the mean error rate curves and error rate at save shot number n_s
# NB the r_hat_mean curves and legend reported r_hat_max errors are the unsmoothed values
opl.plot_curves(ideal_data_list + empirical_data_list,
np.array(range(2+len(empirical_data_list))),
"$\epsilon=0.001$" , ["device"],
["model","metric","mitigation","id_gates"],
right_xlimit = 80, bottom_ylimit = -0.02, top_ylimit = 1)
import winsound
duration = 2000 # milliseconds
freq = 800 # Hz
winsound.Beep(freq, duration)
import qiskit.tools.jupyter
%qiskit_version_table
```
| true |
code
| 0.303281 | null | null | null | null |
|
# Temporal Difference: On-policy n-Tuple Expected Sarsa, Stochastic
```
import numpy as np
```
## Create environment
```
def create_environment_states():
"""Creates environment states.
Returns:
num_states: int, number of states.
num_terminal_states: int, number of terminal states.
num_non_terminal_states: int, number of non terminal states.
"""
num_states = 16
num_terminal_states = 2
num_non_terminal_states = num_states - num_terminal_states
return num_states, num_terminal_states, num_non_terminal_states
def create_environment_actions(num_non_terminal_states):
"""Creates environment actions.
Args:
num_non_terminal_states: int, number of non terminal states.
Returns:
max_num_actions: int, max number of actions possible.
num_actions_per_non_terminal_state: array[int], number of actions per
non terminal state.
"""
max_num_actions = 4
num_actions_per_non_terminal_state = np.repeat(
a=max_num_actions, repeats=num_non_terminal_states)
return max_num_actions, num_actions_per_non_terminal_state
def create_environment_successor_counts(num_states, max_num_actions):
"""Creates environment successor counts.
Args:
num_states: int, number of states.
max_num_actions: int, max number of actions possible.
Returns:
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
"""
num_state_action_successor_states = np.repeat(
a=1, repeats=num_states * max_num_actions)
num_state_action_successor_states = np.reshape(
a=num_state_action_successor_states,
newshape=(num_states, max_num_actions))
return num_state_action_successor_states
def create_environment_successor_arrays(
num_non_terminal_states, max_num_actions):
"""Creates environment successor arrays.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
Returns:
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
"""
sp_idx = np.array(
object=[1, 0, 14, 4,
2, 1, 0, 5,
2, 2, 1, 6,
4, 14, 3, 7,
5, 0, 3, 8,
6, 1, 4, 9,
6, 2, 5, 10,
8, 3, 7, 11,
9, 4, 7, 12,
10, 5, 8, 13,
10, 6, 9, 15,
12, 7, 11, 11,
13, 8, 11, 12,
15, 9, 12, 13],
dtype=np.int64)
p = np.repeat(
a=1.0, repeats=num_non_terminal_states * max_num_actions * 1)
r = np.repeat(
a=-1.0, repeats=num_non_terminal_states * max_num_actions * 1)
sp_idx = np.reshape(
a=sp_idx,
newshape=(num_non_terminal_states, max_num_actions, 1))
p = np.reshape(
a=p,
newshape=(num_non_terminal_states, max_num_actions, 1))
r = np.reshape(
a=r,
newshape=(num_non_terminal_states, max_num_actions, 1))
return sp_idx, p, r
def create_environment():
"""Creates environment.
Returns:
num_states: int, number of states.
num_terminal_states: int, number of terminal states.
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_actions_per_non_terminal_state: array[int], number of actions per
non terminal state.
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
"""
(num_states,
num_terminal_states,
num_non_terminal_states) = create_environment_states()
(max_num_actions,
num_actions_per_non_terminal_state) = create_environment_actions(
num_non_terminal_states)
num_state_action_successor_states = create_environment_successor_counts(
num_states, max_num_actions)
(sp_idx,
p,
r) = create_environment_successor_arrays(
num_non_terminal_states, max_num_actions)
return (num_states,
num_terminal_states,
num_non_terminal_states,
max_num_actions,
num_actions_per_non_terminal_state,
num_state_action_successor_states,
sp_idx,
p,
r)
```
## Set hyperparameters
```
def set_hyperparameters():
"""Sets hyperparameters.
Returns:
num_episodes: int, number of episodes to train over.
maximum_episode_length: int, max number of timesteps for an episode.
num_qs: int, number of state-action-value functions Q_i(s, a).
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
"""
num_episodes = 10000
maximum_episode_length = 200
num_qs = 3
alpha = 0.1
epsilon = 0.1
gamma = 1.0
return num_episodes, maximum_episode_length, num_qs, alpha, epsilon, gamma
```
## Create value function and policy arrays
```
def create_value_function_arrays(num_qs, num_states, max_num_actions):
"""Creates value function arrays.
Args:
num_qs: int, number of state-action-value functions Q_i(s, a).
num_states: int, number of states.
max_num_actions: int, max number of actions possible.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
"""
q = np.repeat(a=0.0, repeats=num_qs * num_states * max_num_actions)
q = np.reshape(a=q, newshape=(num_qs, num_states, max_num_actions))
return q
def create_policy_arrays(num_non_terminal_states, max_num_actions):
"""Creates policy arrays.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
Returns:
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
policy = np.repeat(
a=1.0 / max_num_actions,
repeats=num_non_terminal_states * max_num_actions)
policy = np.reshape(
a=policy,
newshape=(num_non_terminal_states, max_num_actions))
return policy
```
## Create algorithm
```
# Set random seed so that everything is reproducible
np.random.seed(seed=0)
def initialize_epsiode(num_non_terminal_states):
"""Initializes epsiode with initial state.
Args:
num_non_terminal_states: int, number of non terminal states.
Returns:
init_s_idx: int, initial state index from set of non terminal states.
"""
# Randomly choose an initial state from all non-terminal states
init_s_idx = np.random.randint(
low=0, high=num_non_terminal_states, dtype=np.int64)
return init_s_idx
def epsilon_greedy_policy_from_state_action_function(
max_num_actions, q, epsilon, s_idx, policy):
"""Create epsilon-greedy policy from state-action value function.
Args:
max_num_actions: int, max number of actions possible.
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
s_idx: int, current state index.
policy: array[float], learned stochastic policy of which action a to
take in state s.
Returns:
policy: array[float], learned stochastic policy of which action a to
take in state s.
"""
# Combine state-action value functions
q = np.sum(a=q[:, s_idx, :], axis=0)
# Save max state-action value and find the number of actions that have the
# same max state-action value
max_action_value = np.max(a=q)
max_action_count = np.count_nonzero(a=q == max_action_value)
# Apportion policy probability across ties equally for state-action pairs
# that have the same value and zero otherwise
if max_action_count == max_num_actions:
max_policy_prob_per_action = 1.0 / max_action_count
remain_prob_per_action = 0.0
else:
max_policy_prob_per_action = (1.0 - epsilon) / max_action_count
remain_prob_per_action = epsilon / (max_num_actions - max_action_count)
policy[s_idx, :] = np.where(
q == max_action_value,
max_policy_prob_per_action,
remain_prob_per_action)
return policy
def loop_through_episode(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
num_qs,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
s_idx):
"""Loops through episode to iteratively update policy.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
num_qs: int, number of state-action-value functions Q_i(s, a).
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
maximum_episode_length: int, max number of timesteps for an episode.
s_idx: int, current state index.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
# Loop through episode steps until termination
for t in range(0, maximum_episode_length):
# Choose policy for chosen state by epsilon-greedy choosing from the
# state-action-value function
policy = epsilon_greedy_policy_from_state_action_function(
max_num_actions, q, epsilon, s_idx, policy)
# Get epsilon-greedy action
a_idx = np.random.choice(
a=max_num_actions, p=policy[s_idx, :])
# Get reward
successor_state_transition_idx = np.random.choice(
a=num_state_action_successor_states[s_idx, a_idx],
p=p[s_idx, a_idx, :])
reward = r[s_idx, a_idx, successor_state_transition_idx]
# Get next state
next_s_idx = sp_idx[s_idx, a_idx, successor_state_transition_idx]
# Update state action value equally randomly selecting from the
# state-action-value functions
updating_q_idx = np.random.randint(low=0, high=num_qs, dtype=np.int64)
q, policy, s_idx = update_q(
num_non_terminal_states,
max_num_actions,
policy,
alpha,
epsilon,
gamma,
s_idx,
a_idx,
reward,
next_s_idx,
updating_q_idx,
num_qs,
q)
if next_s_idx >= num_non_terminal_states:
break # episode terminated since we ended up in a terminal state
return q, policy
def update_q(
num_non_terminal_states,
max_num_actions,
policy,
alpha,
epsilon,
gamma,
s_idx,
a_idx,
reward,
next_s_idx,
updating_q_idx,
num_qs,
q):
"""Updates state-action-value function using multiple estimates.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
s_idx: int, current state index.
a_idx: int, current action index.
reward: float, current reward from taking action a_idx in state s_idx.
next_s_idx: int, next state index.
updating_q_idx: int, index to which Q_i(s, a) we'll be updating.
num_qs: int, number of state-action-value functions Q_i(s, a).
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
s_idx: int, new current state index.
"""
# Check to see if we actioned into a terminal state
if next_s_idx >= num_non_terminal_states:
delta = reward - q[updating_q_idx, s_idx, a_idx]
q[updating_q_idx, s_idx, a_idx] += alpha * delta
else:
# Get next action, using expectation value
q_indices = np.arange(num_qs)
not_updating_q_idx = np.random.choice(
a=np.extract(condition=q_indices != updating_q_idx, arr=q_indices))
not_updating_v_expected_value_on_policy = np.sum(
a=policy[next_s_idx, :] * q[not_updating_q_idx, next_s_idx, :])
# Calculate state-action-function expectation
delta = gamma * not_updating_v_expected_value_on_policy
delta -= q[updating_q_idx, s_idx, a_idx]
q[updating_q_idx, s_idx, a_idx] += alpha * (reward + delta)
# Update state and action to next state and action
s_idx = next_s_idx
return q, policy, s_idx
def on_policy_temporal_difference_n_tuple_expected_sarsa(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
num_qs,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
num_episodes):
"""Loops through episodes to iteratively update policy.
Args:
num_non_terminal_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_state_action_successor_states: array[int], number of successor
states s' that can be reached from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
num_qs: int, number of state-action-value functions Q_i(s, a).
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
maximum_episode_length: int, max number of timesteps for an episode.
num_episodes: int, number of episodes to train over.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q_i(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
for episode in range(0, num_episodes):
# Initialize episode to get initial state
init_s_idx = initialize_epsiode(num_non_terminal_states)
# Loop through episode and update the policy
q, policy = loop_through_episode(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
num_qs,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
init_s_idx)
return q, policy
```
## Run algorithm
```
def run_algorithm():
"""Runs the algorithm."""
(num_states,
num_terminal_states,
num_non_terminal_states,
max_num_actions,
num_actions_per_non_terminal_state,
num_state_action_successor_states,
sp_idx,
p,
r) = create_environment()
(num_episodes,
maximum_episode_length,
num_qs,
alpha,
epsilon,
gamma) = set_hyperparameters()
q = create_value_function_arrays(num_qs, num_states, max_num_actions)
policy = create_policy_arrays(num_non_terminal_states, max_num_actions)
# Print initial arrays
print("\nInitial state-action value function")
print(q)
print("\nInitial policy")
print(policy)
# Run on policy temporal difference n-tuple expected sarsa
q, policy = on_policy_temporal_difference_n_tuple_expected_sarsa(
num_non_terminal_states,
max_num_actions,
num_state_action_successor_states,
sp_idx,
p,
r,
num_qs,
q,
policy,
alpha,
epsilon,
gamma,
maximum_episode_length,
num_episodes)
# Print final results
print("\nFinal state-action value function")
print(q)
print("\nFinal policy")
print(policy)
run_algorithm()
```
| true |
code
| 0.835551 | null | null | null | null |
|
This is an example showing the prediction latency of various scikit-learn estimators.
The goal is to measure the latency one can expect when doing predictions either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
### Version
```
import sklearn
sklearn.__version__
```
### Imports
This tutorial imports [StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler), [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html#sklearn.model_selection.train_test_split), [scoreatpercentile](http://docs.scipy.org/doc/scipy-0.11.0/reference/generated/scipy.stats.scoreatpercentile.html#scipy.stats.scoreatpercentile), [make_regression](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html#sklearn.datasets.make_regression), [RandomForestRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor), [Ridge](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge), [SGDRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html#sklearn.linear_model.SGDRegressor), [SVR](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html#sklearn.svm.SVR) and [shuffle](http://scikit-learn.org/stable/modules/generated/sklearn.utils.shuffle.html#sklearn.utils.shuffle).
```
from __future__ import print_function
from collections import defaultdict
from plotly import tools
import plotly.plotly as py
import plotly.graph_objs as go
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
```
### Calculations
```
fig1 = tools.make_subplots(rows=4, cols=1,
subplot_titles=(
'Prediction Time per instance - Atomic, 100 feats',
'Prediction Time per instance - Bulk(100), 100 feats',
'Evolution of Prediction Time with #Features ',
'Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features']))
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration, 1)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration, 2)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
```
### Plot Results
Boxplot Runtimes
```
def boxplot_runtimes(runtimes, pred_type, configuration, subplot):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
cls_infos = ['%s<br>(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
box_plot1 = go.Box(y=runtimes[0],showlegend=False,name=cls_infos[0],
fillcolor='rgba(0.4,225, 128, 128)',
line=dict(color="black", width=1))
box_plot2 = go.Box(y=runtimes[1],showlegend=False,name=cls_infos[1],
fillcolor='rgba(0.4,225, 128, 128)',
line=dict(color="black", width=1))
box_plot3 = go.Box(y=runtimes[2],showlegend=False,name=cls_infos[2],
fillcolor='rgba(0.4,225, 128, 128)',
line=dict(color="black", width=1))
fig1.append_trace(box_plot1, subplot, 1)
fig1.append_trace(box_plot2, subplot, 1)
fig1.append_trace(box_plot3, subplot, 1)
axis='yaxis'+str(subplot)
fig1['layout'][axis].update(title='Prediction Time (us)')
axis='xaxis'+str(subplot)
fig1['layout'][axis].update(ticks='Prediction Time (us)')
```
Plot n_features influence.
```
def plot_n_features_influence(percentiles, percentile):
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
line_plot = go.Scatter(x=x, y=y,
showlegend=False,
mode='lines',
line=dict(color="red"))
fig1.append_trace(line_plot, 3, 1)
fig1['layout']['xaxis3'].update(title='#Features')
fig1['layout']['yaxis3'].update(title='Prediction Time at %d%%-ile (us)' % percentile)
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
cls_infos = ['%s<br>(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
bar_plot = go.Bar(x=cls_infos, y= cls_values,
showlegend=False, marker=dict(
color=['red', 'green', 'blue']))
fig1.append_trace(bar_plot, 4, 1)
fig1['layout']['yaxis4'].update(title='Throughput (predictions/sec)')
```
Plot data
```
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
fig1['layout'].update(height=2000)
py.iplot(fig1)
```
### License
Authors:
Eustache Diemert <[email protected]>
License:
BSD 3 clause
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'Prediction-Latency.ipynb', 'scikit-learn/plot-prediction-latency/', 'Prediction Latency | plotly',
' ',
title = 'Prediction Latency | plotly',
name = 'Prediction Latency',
has_thumbnail='true', thumbnail='thumbnail/prediction-latency.jpg',
language='scikit-learn', page_type='example_index',
display_as='real_dataset', order=9,ipynb='~Diksha_Gabha/2674')
```
| true |
code
| 0.8468 | null | null | null | null |
|
# Better Long-Term Stock Forecasts
by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)
/ [GitHub](https://github.com/Hvass-Labs/FinanceOps) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmlHaWuVxIA0pKL1yjryR0Z)
## Introduction
The [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb) showed a strong predictive relationship between the P/Sales ratio and long-term returns of some individual stocks and the S&P 500 stock-market index.
However, there was a considerable amount of noise in those scatter-plots, because we considered fixed investment periods of exactly 10 years, for example. So even though the P/Sales ratio was a strong predictor for the mispricing at the buy-time, it was impossible to predict the mispricing at the sell-time, because the stock-market could be in a bubble or in a crash 10 years into the future, which would distort the estimated returns.
This paper presents a simple solution, which is to consider the average returns for all investment periods between 7 and 15 years, and then make a scatter-plot of the mean returns versus the P/Sales ratio. This produces incredibly smooth curves for estimating the future long-term returns of the S&P 500 and some individual stocks.
Along with the [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb), this is a very important discovery and it has implications for many areas of both theoretical and applied finance. It means that the U.S. stock-market as a whole is not "efficient" and does not follow a purely "random walk" in the long-term. It is possible to estimate the future long-term return of the stock-market and some individual stocks from just a single indicator variable.
## Python Imports
This Jupyter Notebook is implemented in Python v. 3.6 and requires various packages for numerical computations and plotting. See the installation instructions in the README-file.
```
%matplotlib inline
# Imports from Python packages.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
import os
# Imports from FinanceOps.
from curve_fit import CurveFitReciprocal
from data_keys import *
from data import load_index_data, load_stock_data
from returns import prepare_mean_ann_returns
```
## Load Data
We now load all the financial data we will be using.
```
# Define the ticker-names for the stocks we consider.
ticker_SP500 = "S&P 500"
ticker_JNJ = "JNJ"
ticker_K = "K"
ticker_PG = "PG"
ticker_WMT = "WMT"
# Load the financial data for the stocks.
df_SP500 = load_index_data(ticker=ticker_SP500)
df_JNJ = load_stock_data(ticker=ticker_JNJ)
df_K = load_stock_data(ticker=ticker_K)
df_PG = load_stock_data(ticker=ticker_PG)
df_WMT = load_stock_data(ticker=ticker_WMT)
```
## Plotting Functions
These are helper-functions used for making plots.
```
def plot_psales(df, ticker, start_date=None):
"""
Plot the P/Sales ratio.
:param df: Pandas DataFrame with PSALES.
:param ticker: Ticker-name for the stock or index.
:param start_date: Start-date for the plot.
:return: Nothing.
"""
psales = df[PSALES][start_date:].dropna()
psales.plot(title=ticker + " - P/Sales", grid=True)
def plot_ann_returns(ticker, df, key=PSALES,
min_years=7, max_years=15,
use_colors=True):
"""
Create a single scatter-plot with P/Sales or P/Book
vs. Mean Annualized Returns for e.g. 7-15 years.
:param ticker: Ticker-name for the stock or index.
:param df: Pandas DataFrame containing key and TOTAL_RETURN.
:param key: Name of data-column to use e.g. PSALES or PBOOK.
:param min_years: Min number of years for return periods.
:param max_years: Max number of years for return periods.
:param use_colors: Boolean whether to use colors in plot.
:return: Nothing.
"""
# Prepare the data.
# x is the P/Sales or P/Book and y is the Mean Ann. Returns.
x, y = prepare_mean_ann_returns(df=df, key=key,
min_years=min_years,
max_years=max_years)
# Create a single plot.
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(211)
# Scatter-plot.
if use_colors:
# Give each dot in the scatter-plot a shade of blue
# according to the date of the data-point.
ax.scatter(x, y,
c=list(range(len(x))), cmap='Blues',
alpha=1.0, marker='o')
else:
# Use the same color for all dots.
ax.scatter(x, y, marker='o')
# First part of the title.
title1 = "[{0}] {1} vs. {2}-{3} Years Mean Ann. Return"
title1 = title1.format(ticker, key, min_years, max_years)
# X-values for plotting fitted curves.
x_min = np.min(x)
x_max = np.max(x)
x_range = np.arange(x_min, x_max, (x_max/x_min)/1000)
# Plot reciprocal curve-fit.
curve_fit_reciprocal = CurveFitReciprocal(x=x, y=y)
y_pred = curve_fit_reciprocal.predict(x=x_range)
ax.plot(x_range, y_pred, color='red')
# Title with these curve-fit parameters.
title2 = "Mean Ann. Return = {0:.1%} / " + key + " + {1:.1%}"
title2 = title2.format(*curve_fit_reciprocal.params)
# Combine and set the plot-title.
title = "\n".join([title1, title2])
ax.set_title(title)
# Set axis labels.
ax.set_xlabel(key)
ax.set_ylabel("Mean Ann. Return")
# Convert y-ticks to percentages.
# We use a custom FuncFormatter because PercentFormatter
# is inconsistent with string-formatters used elsewhere.
formatter = FuncFormatter(lambda y, _: '{:.0%}'.format(y))
ax.yaxis.set_major_formatter(formatter)
# Show grid.
ax.grid()
# Show the plot.
plt.show()
```
## Case Study: S&P 500
The S&P 500 is a stock-market index consisting of the stocks of 500 of the largest companies in USA. The S&P 500 covers about 80% of the whole U.S. stock-market in terms of size so it is useful as a gauge for the entire U.S. stock-market.
We consider the Total Return of the S&P 500 which is what you would get from investing in the S&P 500 and re-investing all dividends back into the S&P 500. We ignore all taxes here.
The following scatter-plot shows the P/Sales ratio versus the Mean Annualized Returns of the S&P 500 for periods between 7 and 15 years.
For each day we calculate the Total Return of the S&P 500 over the next 7-15 years, then we calculate the Mean Annualized Return from those, and then we put a blue dot in the scatter-plot for that date's P/Sales ratio and the Mean Annualized Return we just calculated. This process is continued for all days in the time-series, until we have calculated and plotted the P/Sales vs. Mean Annualized Return for all days.
As can be seen from this scatter-plot, the P/Sales ratio is a very strong predictor for long investment periods between 7-15 years. We call the fitted red curve for the "return curve".
```
plot_ann_returns(ticker=ticker_SP500, df=df_SP500, key=PSALES,
min_years=7, max_years=15, use_colors=True)
```
We can forecast the future long-term returns using the fitted "return curve" from the scatter-plot above. Towards the end of 2017, the P/Sales ratio was almost 2.2 for the S&P 500, which was about the previous high point of the "Dot-Com" bubble around year 2000.
```
df_SP500[PSALES].dropna().tail(1)
plot_psales(df=df_SP500, ticker=ticker_SP500)
```
So if you purchased the S&P 500 in December 2017 at this P/Sales ratio and will keep the investment for more than 7 years, while reinvesting all dividends during those years (all taxes are ignored), then the formula forecasts an annualized return of about 1.35%:
$$
Annualized\ Return = 14.4\% / (P/Sales) - 5.2\% = 14.4\% / 2.2 - 5.2\% \simeq 1.35\%
$$
The formula cannot predict exactly what will happen in the future, because there might be a stock-market bubble or a crash in any given year. The formula merely predicts an average annualized return for long-term investments of about 7-15 years in the S&P 500.
## Case Study: Johnson & Johnson (JNJ)
Now let us consider individual companies instead of a whole stock-market index. The first company we consider is Johnson & Johnson with the ticker symbol JNJ. This is a very large company with over 130.000 employees worldwide that manufacture a wide range of health-care related products.
When we plot the P/Sales ratio versus the mean annualized return for 7-15 year periods, we see that the "return curve" fits quite well although there appears to be a few separate "return curves" for P/Sales ratios roughly between 2 and 3.
The blue shades in the scatter-plot indicate the time of the data-points and suggest that the separate curves belong to different periods of time. More research would be needed to establish why these periods have different "return curves". Perhaps the periods had significantly different profit-margins or sales-growth.
```
plot_ann_returns(ticker=ticker_JNJ, df=df_JNJ, key=PSALES,
min_years=7, max_years=15, use_colors=True)
```
Towards the end of 2017 the P/Sales ratio was about 4.9 which is close to the all-time historical highs experienced during the stock-market bubble around year 2000.
```
df_JNJ[PSALES].dropna().tail(1)
plot_psales(df=df_JNJ, ticker=ticker_JNJ)
```
Using the formula for the fitted "return curve" from the scatter-plot above, we get this forecasted long-term return:
$$
Annualized\ Return \simeq 77.9\% / (P/Sales) - 8.9\%
\simeq 77.9\% / 4.9 - 8.9\% \simeq 7.0\%
$$
So according to this formula, the annualized return of the JNJ stock will be around 7.0% if you own the stock for at least 7 years, when dividends are reinvested and ignoring taxes.
Again there is the caveat that it is impossible to predict whether there will be a stock-market bubble or crash several years into the future, so the forecasted return is an average for 7-15 year investment periods.
## Case Study: Procter & Gamble (PG)
Another very large company is Procter & Gamble with the ticker symbol PG, which sells a wide range of consumer products and has almost 100.000 employees.
If we plot the P/Sales ratio versus the mean annualized return we get an incredibly regular curve of data-points. The red line shows a reciprocal curve-fit, which is apparently not the correct formula for this data, as it doesn't fit so well at the ends. You are encouraged to try and find a better curve-fit and a theoretical explanation why your formula is better.
```
plot_ann_returns(ticker=ticker_PG, df=df_PG, key=PSALES,
min_years=7, max_years=15)
```
When we plot the historical P/Sales ratio, we see that at the end of 2017 it was around 3.5 which was near its all-time high experienced during the bubble around year 2000.
```
plot_psales(df=df_PG, ticker=ticker_PG)
```
Using the fitted reciprocal curve from the scatter-plot above, we get a forecasted return of about 6.1% per year, when dividends are reinvested without taxes:
$$
Annualized\ Return \simeq 24.4\% / (P/Sales) - 0.9\% \simeq
24.4\% / 3.5 - 0.9\% \simeq 6.1\%
$$
But it should again be noted that this formula doesn't fit so well towards the ends of the data, and looking at the scatter-plot suggests a slightly lower return of maybe 5.5%.
## Case Study: Kellogg's (K)
The next company is Kellogg's which trades under the ticker symbol K. The company has about 33.000 employees and is especially known for making breakfast cereals.
When we plot the P/Sales ratio versus the mean annualized return it shows a strong trend that higher P/Sales ratios gives lower long-term returns, although the curve-fit is not as good as for the other companies we studied above, especially for lower P/Sales ratios.
The blue shades show the time of the data-points. It can be hard to see in this plot, but for P/Sales ratios between 1.50 and 1.75, there is a "blob" of light-blue data-points well above the fitted red curve. This clearly indicates that the outlying data-points belong to a specific period in time. But we would have to do more research into the financial data for that period, to uncover the reason why the returns are so different.
```
plot_ann_returns(ticker=ticker_K, df=df_K, key=PSALES,
min_years=7, max_years=15, use_colors=True)
```
Towards the end of 2017 the P/Sales ratio was about 1.8 which was actually very close to the historical average.
```
df_K[PSALES].dropna().mean()
plot_psales(df=df_K, ticker=ticker_K)
```
Using the fitted "return curve" from the scatter-plot above with the P/Sales ratio of 1.8 we get the forecasted return:
$$
Annualized\ Return \simeq 27.5\% / (P/Sales) - 6.2\% \simeq
27.5\% / 1.8 - 6.2\% \simeq 9.1\%
$$
So a forecasted return of about 9.1% per year over the next 7-15 years when dividends are reinvested without taxes. That is about 2% (percentage points) higher than the return forecasted for JNJ and 3% higher than forecasted for PG above.
## Case Study: Wal-Mart (WMT)
Now let us consider the company Wal-Mart which trades under the ticker symbol WMT. It is an extremely large retail-company with about 2.3 million employees.
If we plot the P/Sales ratio versus the mean annualized return, we see that the red curve fits very poorly. There seems to be several separate trends in the data, and the blue shades indicate that the trends belong to different periods in time. But more research into the company's financial history would be needed to uncover the reason for this, perhaps it is because of significantly different sales-growth, profit margins, etc.
```
plot_ann_returns(ticker=ticker_WMT, df=df_WMT, key=PSALES,
min_years=7, max_years=15, use_colors=True)
```
## Conclusion
We have shown that the P/Sales ratio is a very strong predictor for the long-term returns of the S&P 500 index and some individual stocks.
In the [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb) we considered fixed investment periods of e.g. 10 years, which meant that the investment return depended on the P/Sales ratio both at the time of buying and selling. This distorted the data because sometimes the stock-market would be in a bubble or crash 10 years later.
In this paper we presented a simple solution by considering all investment periods between 7 and 15 years, and then using the average return instead. This averages out the distorting effects of future bubbles and crashes, so we get much more smooth data that only depends on the P/Sales ratio at the buy-time.
We then fitted a reciprocal "return curve" to the scatter-plots, and although it generally had a very tight fit, it was not so accurate towards the end-points, thus suggesting that the reciprocal formula is not entirely correct for this data. It would be of great interest to not only find a mathematical model that fits better, but also a theoretical explanation why that model makes sense. Perhaps such a model would also allow us to use smaller amounts of data and take into account the changing economics of a business. Perhaps we could use such a model to forecast returns of more companies where the basic method does not work so well, such as Wal-Mart as demonstrated above.
It should be stressed that the forecasted returns will also depend on a *qualitative* assessment of the company. If the company's future will be significantly different from its historical sales, profit-margins and growth, then the forecasted returns will be inaccurate. That is why this forecasting method is perhaps best used on broad stock-market indices such as the S&P 500, or companies whose products and markets are expected to be highly predictable long into the future.
## Research Ideas
You are strongly encouraged to do more research on this topic. If you make any new discoveries then please let me know your results.
To my knowledge, there are no academic studies of predicting the long-term returns of stocks and stock-markets as we have done here. This work has presented the basic idea and methodology, but a lot more research can be done on this subject and it may impact many areas of both theoretical and applied finance.
Here are a few more research ideas to get you started, in addition to the ideas from the [previous paper](https://github.com/Hvass-Labs/FinanceOps/blob/master/01_Forecasting_Long-Term_Stock_Returns.ipynb):
- Try other investment periods, for example 5 to 10 years. How does it change the scatter-plots and the fitted "return curves"?
- Try using P/Book as the predictor signal. How does that affect the plots? Why?
- Although the data in some of these scatter-plots is incredibly smooth, the reciprocal curve does not fit the data exactly, which suggests that it is the wrong formula for this data. Can you find a better formula and perhaps give a theoretical explanation why that is better?
- What is the reason that some companies such as Wal-Mart have several different trend-lines in the scatter-plot? You will probably need to investigate the historical financial data to uncover the reason. Can you modify the forecasting method to somehow take this into account?
## License (MIT)
Copyright (c) 2015-18 by [Magnus Erik Hvass Pedersen](http://www.hvass-labs.org/)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| true |
code
| 0.830199 | null | null | null | null |
|
# Image analysis with fMRI 3D images imported with LORIS API
This is a tutorial to show how to use Loris' API to download MRI images. It also contains a few examples of how the data can be used to run basic data analysis.
This tutorial is also available as a Google colab notebook so you can run it directly from your browser. To access it, click on the button below: <a href="https://colab.research.google.com/github/spell00/Loris/blob/2020-08-06-JupyterCreateImageDataset/docs/notebooks/LORIS-API_Part3-Create_image_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# Uncomment and run to install the packages required to run the notebook
# !pip3 install tqdm
# !pip3 install numpy
# !pip3 install nibabel
# !pip3 install sklearn
# !pip3 install matplotlib
# !pip3 install nilearn
```
## Setup
```
import getpass # For input prompt not to show what is entered
import json # Provide convenient functions to handle json objects
import re # For regular expression
import requests # To handle http requests
import nibabel as nib
import numpy as np
import warnings
from tqdm import tqdm_notebook as tqdm # To make a nice progress bar
import os
import itertools
os.chdir('..')
warnings.simplefilter('ignore') # Because I am using unverified ssl certificates
def prettyPrint(string):
print(json.dumps(string, indent=2, sort_keys=True))
import argparse
import torch
import torch.nn as nn
import numpy as np
import json
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from fmri.utils.activations import Swish, Mish
from fmri.utils.CycleAnnealScheduler import CycleScheduler
from fmri.utils.dataset import load_checkpoint, save_checkpoint, MRIDataset
from fmri.utils.transform_3d import Normalize, RandomRotation3D, ColorJitter3D, Flip90, Flip180, Flip270, XFlip, YFlip, \
ZFlip
from fmri.models.supervised.MLP import MLP
from fmri.utils.plot_performance import plot_performance
import torchvision
from torchvision import transforms
from ax.service.managed_loop import optimize
import random
import nibabel as nib
from fmri.utils.utils import validation_spliter
import nilearn.plotting as nlplt
```
## Getting the data
The data on https://demo.loris.ca are only for development purposes. Nevertheless, with this in mind, we will use it for demonstration purposes only. In this tutorial, we will download all the T1 and T2 raw images from every project.
```
images_path = 'D:\\workbench\\projects\\AutoTKV_MouseMRI-master\\AllTrainingImages\\images\\'
targets_path = 'D:\\workbench\\projects\\AutoTKV_MouseMRI-master\\AllTrainingImages\\targets\\'
all_set = MRIDataset(images_path, targets_path, transform=None, resize=False)
spliter = validation_spliter(all_set, cv=5)
valid_set, train_set = spliter.__next__()
train_loader = DataLoader(train_set,
num_workers=0,
shuffle=True,
batch_size=1,
pin_memory=False,
drop_last=True)
valid_loader = DataLoader(valid_set,
num_workers=0,
shuffle=True,
batch_size=1,
pin_memory=False,
drop_last=True)
sample_x, sample_target = next(iter(valid_set))
sample_x = sample_x.numpy().squeeze()
sample_target = sample_target.numpy().squeeze()
np.round(sample.shape) / 2
def np_to_nifti(sample):
coords = np.round(sample.shape) / 2
t1_fullimage = nib.Nifti1Image(sample_x, np.eye(4))
return nlplt.plot_anat(t1_fullimage, (128, 128, 10))
np_to_nifti(sample_x)
np_to_nifti(sample_target)
def _resize_data(data, new_size=(160, 160, 160)):
initial_size_x = data.shape[0]
initial_size_y = data.shape[1]
initial_size_z = data.shape[2]
new_size_x = new_size[0]
new_size_y = new_size[1]
new_size_z = new_size[2]
delta_x = initial_size_x / new_size_x
delta_y = initial_size_y / new_size_y
delta_z = initial_size_z / new_size_z
new_data = np.zeros((new_size_x, new_size_y, new_size_z))
for x, y, z in itertools.product(range(new_size_x),
range(new_size_y),
range(new_size_z)):
new_data[x][y][z] = data[int(x * delta_x)][int(y * delta_y)][int(z * delta_z)]
return new_data
sample_x_14x14x14 = _resize_data(sample_x, (14, 14, 14))
t1_fullimage = nib.Nifti1Image(sample_x_14x14x14, np.eye(4))
nlplt.plot_anat(t1_fullimage, (7, 7, 7))
nlplt.show()
training_images_dir = 'D:\workbench\projects\AutoTKV_MouseMRI-master\AllTrainingImages\images'
training_targets_dir = 'D:\workbench\projects\AutoTKV_MouseMRI-master\AllTrainingImages\targets'
```
#### Then, we get the information necessary to retrieve all images from all the projects and store them in a dictionnary.
```
# The dictionary to store the images
images_dict = {
"raw": {
't1': [],
't2': []
},
"32x32x32": {
't1': [],
't2': []
},
"128x128x128": {
't1': [],
't2': []
}
}
# Progress bar for downloads
pbar = tqdm(total=sum([len([meta for meta in imagesMeta[p]['Images'] if meta['ScanType'] in ['t1', 't2']]) for p in projectnames]))
for project in projectnames:
for i, meta in enumerate(imagesMeta[project]['Images']):
if(meta['ScanType'] not in ['t1', 't2']):
continue
r = requests.get(baseurl + meta['Link'],
headers = {'Authorization': 'Bearer %s' % token})
page = r.content
filename = meta['Link'].split('/')[-1]
t = meta['ScanType']
# The images need to be saved first.
# Only t1 and t2 images are kept.
if (t in ['t1', 't2']):
file_ = open(filename, 'wb')
else:
continue
file_.write(page)
file_.close()
img = nib.load(filename)
# The images are not necessary for the rest of this tutorial.
os.remove(filename)
img = img.get_fdata()
# The images are save in the dictionary
if(meta['ScanType'] == 't1'):
images_dict["raw"]["t1"] += [img]
if(meta['ScanType'] == 't2'):
images_dict["raw"]["t2"] += [img]
pbar.update(1)
```
## Preprocessing
In this section, we'll explore a few preprocessing methods that might make the models learned perform better.
### Resize images
In this tutorial, T1 and T2 images are compared. They are of similar sizes (160x256x224 and 160x256x256 for T1 and T2, respectively), but they need to be exactly the same size for any subsequent analysis.
In machine learning, it is common practice to reduce large images before training a model. Large images have the advantage of containing more information, but it comes with a tradeoff known as the Curse of dimensionality. Having a high dimensionality can make it much easier to have good performances on the training set, but the models trained overfit more easily to the training data and perform poorly on the validation and test data.
Of course, reducing images too much will also harm the performance of the model trained. There is no rule of thumb or algorithm to get the optimal size of images to be used in a specific task, so it might be a good idea to try a few different reductions.
This tutorial will explore 2 dimensions. Both will cubes (all sides have the same length): 128x128x128 and 32x32x32. The later dimensions might be a huge reduction, but the 3D images still have 32,768 dimensions (each voxel being a dimension), which is still huge, but much more manageable than the larger reduction, which has 2,097,152 dimensions. In order to decide which reduction to use, we will observe the data using a Principal Component Analysis (PCA). It will give an idea of whether the data has lost too much information to use it in a classification task.
Ultimately, it might be necessary to use both strategies to test if one is better than the other. In case both strategies appear to be equal, Ockham's razor principle suggest the images with fewer voxels should be used. In this case, the notion of equality is somewhat arbitrary and might depend on the task to be accomplished.
```
def resize_image(image, new_size=(160, 160, 160)):
"""
Function to resize an image.
Args:
image (Numpy array of shape (Length, Width, Depth)): image to transform
new_size (3-Tuple) : The new image length, width and Depth
"""
initial_size_x = image.shape[0]
initial_size_y = image.shape[1]
initial_size_z = image.shape[2]
new_size_x = new_size[0]
new_size_y = new_size[1]
new_size_z = new_size[2]
delta_x = initial_size_x / new_size_x
delta_y = initial_size_y / new_size_y
delta_z = initial_size_z / new_size_z
new_image = np.zeros((new_size_x, new_size_y, new_size_z))
for x, y, z in itertools.product(range(new_size_x),
range(new_size_y),
range(new_size_z)):
new_image[x][y][z] = image[int(x * delta_x)][int(y * delta_y)][int(z * delta_z)]
return new_image
```
We need to create new directeories to save the resized T1 and T2 images.
#### Resize and normalize all T1 images
```
from sklearn.preprocessing import Normalizer
pbar = tqdm(total=len(images_dict['raw']['t1']))
for t1 in images_dict['raw']["t1"]:
t1_32 = resize_image(t1, (32, 32, 32))
t1_32 = Normalizer().fit_transform(t1_32.reshape([1, -1]))
t1_32 = t1_32.reshape([-1, 32, 32, 32])
images_dict['32x32x32']['t1'] += [t1_32]
t1_128 = resize_image(t1, (128, 128, 128))
t1_128 = Normalizer().fit_transform(t1_128.reshape([1, -1]))
t1_128 = t1_128.reshape([-1, 128, 128, 128])
images_dict['128x128x128']['t1'] += [t1_128]
pbar.update(1)
"""
We don't need to save the images for this tutorial, but the package nibabel
can be used to save the images to disk like this:
img = nib.Nifti1Image(image_to_save, np.eye(4))
img.to_filename("/path/to/new_file_name.nii")
"""
# Make numpy arrays from the lists of numpy arrays
images_dict['32x32x32']['t1'] = np.stack(images_dict['32x32x32']['t1'])
images_dict['128x128x128']['t1'] = np.stack(images_dict['128x128x128']['t1'])
```
#### Resize and normalize T2 images
```
pbar = tqdm(total=len(images_dict['raw']['t2']))
for t2 in images_dict['raw']["t2"]:
t2_32 = resize_image(t2, (32, 32, 32))
t2_32 = Normalizer().fit_transform(t2_32.reshape([1, -1]))
t2_32 = t2_32.reshape([-1, 32, 32, 32])
images_dict['32x32x32']['t2'] += [t2_32]
t2_128 = resize_image(t2, (128, 128, 128))
t2_128 = Normalizer().fit_transform(t2_128.reshape([1, -1]))
t2_128 = t2_128.reshape([-1, 128, 128, 128])
images_dict['128x128x128']['t2'] += [t2_128]
pbar.update(1)
# Make numpy arrays from the lists of numpy arrays
images_dict['32x32x32']['t2'] = np.stack(images_dict['32x32x32']['t2'])
images_dict['128x128x128']['t2'] = np.stack(images_dict['128x128x128']['t2'])
```
### Visualisation with nilearn
Visualisation of the raw images and the 2 reductions for T1 and T2 images.
#### T1 images
```
# This package is used to plot a section of the 3D images
import nilearn.plotting as nlplt
print("Original (160x256x224)")
t1_fullimage = nib.Nifti1Image(images_dict['raw']['t1'][0], np.eye(4))
nlplt.plot_anat(t1_fullimage, (80, 128, 112))
nlplt.show()
print("128x128x128")
img_t1_128 = nib.Nifti1Image(resize_image(images_dict['raw']['t1'][0], (128, 128, 128)), np.eye(4))
nlplt.plot_anat(img_t1_128, (64, 64, 64))
nlplt.show()
print("32x32x32")
img_t1_32 = nib.Nifti1Image(resize_image(images_dict['raw']['t1'][0], (32, 32, 32)), np.eye(4))
nlplt.plot_anat(img_t1_32, (16, 16, 16))
nlplt.show()
```
#### T2 images
```
print("Original (160x256x256)")
t2_fullimage = nib.Nifti1Image(images_dict['raw']['t2'][0], np.eye(4))
nlplt.plot_anat(t2_fullimage, (80, 128, 112))
nlplt.show()
print("128x128x128")
img_t2_128 = nib.Nifti1Image(resize_image(images_dict['raw']['t2'][0], (128, 128, 128)), np.eye(4))
nlplt.plot_anat(img_t2_128, (64, 64, 64))
nlplt.show()
print("32x32x32")
img_t2_32 = nib.Nifti1Image(resize_image(images_dict['raw']['t2'][0], (32, 32, 32)), np.eye(4))
nlplt.plot_anat(img_t2_32, (16, 16, 16))
nlplt.show()
```
## Unsupervised learning: Principal Component Analysis
Principal Component Analysis (PCA) is a popular method used for dimensioanlity reduction, which is a good first step to vizualise the data to analyse and can give insight for the subsequent steps of the analysis. Dimensionality reduction can also be used to transform the data before using it to train a ML model.
```
# sklearn needs the data to be flattened
images_dict['32x32x32']['t1'] = images_dict['32x32x32']['t1'].reshape(
[images_dict['32x32x32']['t1'].shape[0], -1]
)
images_dict['128x128x128']['t1'] = images_dict['128x128x128']['t1'].reshape(
[images_dict['128x128x128']['t1'].shape[0], -1]
)
images_dict['32x32x32']['t2'] = images_dict['32x32x32']['t2'].reshape(
[images_dict['32x32x32']['t2'].shape[0], -1]
)
images_dict['128x128x128']['t2'] = images_dict['128x128x128']['t2'].reshape(
[images_dict['128x128x128']['t2'].shape[0], -1]
)
#@title The orginal T1 images have a total of 9175040 voxels.
from IPython.display import Markdown as md
md("The sizes for the 32x32x32 and 128x128x128 images are \
{} and {}, respectively. They represent {}% and \
{}% of the original size.".format(images_dict['32x32x32']['t1'].shape[1],
images_dict['128x128x128']['t1'].shape[1],
np.round(images_dict['32x32x32']['t1'].shape[1] / 9175040 * 100, 2),
np.round(images_dict['128x128x128']['t1'].shape[1] / 9175040 * 100, 2),
)
)
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
pca32 = PCA(n_components=2)
pca32.fit(
np.concatenate([
images_dict['32x32x32']['t1'][:30],
images_dict['32x32x32']['t2'][:30]
], 0)
)
# Some samples (usually ~10-20%) are used as validation data that will not
# be used to train the model.
t1_transform_train = pca32.transform(images_dict['32x32x32']['t1'][:30])
t2_transform_train = pca32.transform(images_dict['32x32x32']['t2'][:30])
t1_transform_valid = pca32.transform(images_dict['32x32x32']['t1'][30:])
t2_transform_valid = pca32.transform(images_dict['32x32x32']['t2'][30:])
plt.figure(figsize=(12,6))
blues = ['b' for _ in range(len(images_dict['32x32x32']['t1'][:30]))]
greens = ['g' for _ in range(len(images_dict['32x32x32']['t2'][:30]))]
reds = ['r' for _ in range(len(images_dict['32x32x32']['t1'][30:]))]
cyans = ['c' for _ in range(len(images_dict['32x32x32']['t2'][30:]))]
blue_patch = mpatches.Patch(color='b', label='T1 (train)')
green_patch = mpatches.Patch(color='g', label='T2 (train)')
red_patch = mpatches.Patch(color='r', label='T1 (valid)')
cyan_patch = mpatches.Patch(color='c', label='T2 (valid)')
plt.scatter(t1_transform_train[:, 0], t1_transform_train[:, 1], c=blues)
plt.scatter(t2_transform_train[:, 0], t2_transform_train[:, 1], c=greens)
plt.scatter(t1_transform_valid[:, 0], t1_transform_valid[:, 1], c=reds)
plt.scatter(t2_transform_valid[:, 0], t2_transform_valid[:, 1], c=cyans)
plt.title('PCA of images resized to 32x32x32')
plt.legend()
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.legend(handles=[blue_patch, green_patch, red_patch, cyan_patch])
plt.show()
plt.close()
pca128 = PCA(n_components=2)
pca128.fit(
np.concatenate([
images_dict['128x128x128']['t1'][:30],
images_dict['128x128x128']['t2'][:30]
], 0)
)
t1_transform_train = pca128.transform(images_dict['128x128x128']['t1'][:30])
t2_transform_train = pca128.transform(images_dict['128x128x128']['t2'][:30])
t1_transform_valid = pca128.transform(images_dict['128x128x128']['t1'][30:])
t2_transform_valid = pca128.transform(images_dict['128x128x128']['t2'][30:])
plt.figure(figsize=(12,6))
plt.scatter(t1_transform_train[:, 0], t1_transform_train[:, 1], c=blues)
plt.scatter(t2_transform_train[:, 0], t2_transform_train[:, 1], c=greens)
plt.scatter(t1_transform_valid[:, 0], t1_transform_valid[:, 1], c=reds)
plt.scatter(t2_transform_valid[:, 0], t2_transform_valid[:, 1], c=cyans)
plt.title('PCA of images resized to 128x128x128')
plt.xlabel('Component 1')
plt.ylabel('Component 2')
plt.legend(handles=[blue_patch, green_patch, red_patch, cyan_patch])
plt.show()
plt.close()
#@title The orginal T1 images have a total of 9175040 voxels.
from IPython.display import Markdown as md
md("For the 128x128x128 voxel images, the first component of the PCA "
"explains ~{}% of the variance of the images and the second ~{}%. "
"For the 32x32x32 images, the first component explains {}% of the "
"variance and the second {}%".format(
np.round(pca128.explained_variance_ratio_[0] * 100, 2),
np.round(pca128.explained_variance_ratio_[1] * 100, 2),
np.round(pca32.explained_variance_ratio_[0] * 100, 2),
np.round(pca32.explained_variance_ratio_[1] * 100, 2),
))
```
## Basic machine learning classification model
The classification in this tutorial is trivial, so a simple linear model like a logistic regression classifier should be able to learn hot to perfectly classify the images for both image sizes.
```
from sklearn.linear_model import LogisticRegression
print('32x32x32')
lr32 = LogisticRegression()
labels = [0 for x in range(len(images_dict['32x32x32']['t1'][:30]))] + \
[1 for x in range(len(images_dict['32x32x32']['t2'][:30]))]
labels_valid = [0 for x in range(len(images_dict['32x32x32']['t1'][30:]))] + \
[1 for x in range(len(images_dict['32x32x32']['t2'][30:]))]
lr32.fit(
np.concatenate([
images_dict['32x32x32']['t1'][:30],
images_dict['32x32x32']['t2'][:30]
], 0),
labels
)
# Labels T1 are 0s and T2 are 1
labels_t1_train = [0 for _ in preds_t1]
labels_t1_valid = [0 for _ in preds_t1_valid]
labels_t2_train = [1 for _ in preds_t2]
labels_t2_valid = [1 for _ in preds_t2_valid]
preds_t1 = lr32.predict(images_dict['32x32x32']['t1'][:30])
preds_t2 = lr32.predict(images_dict['32x32x32']['t2'][:30])
preds_t1_valid = lr32.predict(images_dict['32x32x32']['t1'][30:])
preds_t2_valid = lr32.predict(images_dict['32x32x32']['t2'][30:])
accuracy = sum([1 if pred == target else 0 for (pred, target) in zip(
np.concatenate((preds_t1_train, preds_t2_train)),
np.concatenate((labels_t1_train, labels_t2_train)))]
) / len(labels)
accuracy_valid = sum([1 if pred == target else 0 for (pred, target) in zip(
np.concatenate((preds_t1_valid, preds_t2_valid)),
np.concatenate((labels_t1_valid, labels_t2_valid)))]
) / len(labels_valid)
print('Train Accuracy: ', accuracy)
print('Valid Accuracy: ', accuracy_valid)
print('128x128x128')
lr128 = LogisticRegression()
labels = [0 for x in range(len(images_dict['128x128x128']['t1'][:30]))] + \
[1 for x in range(len(images_dict['128x128x128']['t2'][:30]))]
labels_valid = [0 for x in range(len(images_dict['128x128x128']['t1'][30:]))] + \
[1 for x in range(len(images_dict['32x32x32']['t2'][30:]))]
lr128.fit(
np.concatenate([
images_dict['128x128x128']['t1'][:30],
images_dict['128x128x128']['t2'][:30]
], 0),
labels
)
preds_t1_train = lr128.predict(images_dict['128x128x128']['t1'][:30])
preds_t2_train = lr128.predict(images_dict['128x128x128']['t2'][:30])
preds_t1_valid = lr128.predict(images_dict['128x128x128']['t1'][30:])
preds_t2_valid = lr128.predict(images_dict['128x128x128']['t2'][30:])
accuracy = sum([1 if pred == target else 0 for (pred, target) in zip(
np.concatenate((preds_t1_train, preds_t2_train)),
np.concatenate((labels_t1_train, labels_t2_train)))]
) / len(labels)
accuracy_valid = sum([1 if pred == target else 0 for (pred, target) in zip(
np.concatenate((preds_t1_valid, preds_t2_valid)),
np.concatenate((labels_t1_valid, labels_t2_valid)))]
) / len(labels_valid)
print('Train Accuracy: ', accuracy)
print('Valid Accuracy: ', accuracy_valid)
```
| true |
code
| 0.580411 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/zahraDehghanian97/Poetry_Generator/blob/master/Word_Poem_generator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pickle
from nltk.metrics import accuracy ,ConfusionMatrix
from nltk.translate.bleu_score import sentence_bleu
seqLength = 20
BATCH_SIZE = 64
BUFFER_SIZE = 100
embedding_dim = 256
rnn_units = 1024
```
# make data ready
```
filepath = "/content/drive/MyDrive/Colab Notebooks/my_shahname_represntation.txt"
with open(filepath, "rb") as f:
corpus , test = pickle.load(f)
corpus = corpus.replace("\t"," \t ").replace("\n", " \n ")
corpusList = [w for w in corpus.split(' ')]
corpus_words = [i for i in corpusList if i]
map(str.strip, corpus_words)
vocab = sorted(set(corpus_words))
print(len(corpus_words))
vocab_size = len(vocab)
word2idx = {u: i for i, u in enumerate(vocab)}
idx2words = np.array(vocab)
word_as_int = np.array([word2idx[c] for c in corpus_words])
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
# examples_per_epoch = len(corpus_words)//(seqLength + 1)
wordDataset = tf.data.Dataset.from_tensor_slices(word_as_int)
sequencesOfWords = wordDataset.batch(seqLength + 1, drop_remainder=True)
dataset = sequencesOfWords.map(split_input_target)
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
```
# LSTM Model
```
def create_model_lstm(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(vocab_size, embedding_dim,batch_input_shape=[batch_size, None]))
model.add(tf.keras.layers.LSTM(rnn_units,return_sequences=True,stateful=True,recurrent_initializer='glorot_uniform'))
model.add(tf.keras.layers.LSTM(rnn_units,return_sequences=True,stateful=True,recurrent_initializer='glorot_uniform'))
model.add(tf.keras.layers.Dense(vocab_size))
return model
lstm_model = create_model_lstm(vocab_size = len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=BATCH_SIZE)
lstm_model.compile(optimizer='adam', loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
history = lstm_model.fit(dataset, epochs=50)
main_lstm_model = create_model_lstm(vocab_size = len(vocab), embedding_dim=embedding_dim, rnn_units=rnn_units, batch_size=1)
main_lstm_model.set_weights(lstm_model.get_weights())
# main_lstm_model = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/word_lstm.h5')
main_lstm_model.summary()
def generate_text(model, start_string):
num_generate = 200
start_string_list =[]
for w in start_string.split(' '):
if w in word2idx :
start_string_list.append(w)
input_eval = [word2idx[s] for s in start_string_list]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2words[predicted_id])
return (start_string + ' '.join(text_generated))
print(generate_text(main_lstm_model, start_string=u"چنین گفت رستم به اسفندیار"))
main_lstm_model.save("/content/drive/MyDrive/Colab Notebooks/word_lstm.h5")
```
# Test
```
BLEU_scores = []
accuracy_scores = []
poem = test[0]
start = poem[:25]
generated_poem = generate_text(main_lstm_model, start_string=start)
BLEU_scores.append(sentence_bleu(poem, generated_poem))
len_min = min(len(poem),len(generated_poem))
accuracy_scores.append(accuracy(poem[:len_min], generated_poem[:len_min]))
print("-----------------------")
print("start sentence : ",start)
print(generated_poem)
print("BLEU score = ",BLEU_scores[-1])
print("Accuracy score = ",accuracy_scores[-1])
print("Confusion matrix =")
print(ConfusionMatrix(poem[:len_min], generated_poem[:len_min]))
counter = 0
for poem in test :
counter+=1
start = poem[:25]
generated_poem = generate_text(main_lstm_model, start_string=start)
BLEU_scores.append(sentence_bleu(poem, generated_poem))
len_min = min(len(poem),len(generated_poem))
accuracy_scores.append(accuracy(poem[:len_min], generated_poem[:len_min]))
print("-----------------------")
print("sentence number : ",counter)
print("BLEU score = ",BLEU_scores[-1])
print("Accuracy score = ",accuracy_scores[-1])
print("<<------------final report----------->>")
print("number of test set = ",len(test))
print("mean BLEU score = ",np.mean(BLEU_scores))
print("mean Accuracy score = ",np.mean(accuracy_scores))
```
| true |
code
| 0.485661 | null | null | null | null |
|
```
# #colabを使う方はこちらを使用ください。
# !pip install torch==0.4.1
# !pip install torchvision==0.2.1
# !pip install numpy==1.14.6
# !pip install matplotlib==2.1.2
# !pip install pillow==5.0.0
# !pip install opencv-python==3.4.3.18
# !pip install torchtext==0.3.1
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
#torchtextを使用
from torchtext import data
from torchtext import vocab
from torchtext import datasets
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
# データとモデルに.to(device)を指定してgpuの計算資源を使用する。
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
```
#文章生成
## データの読み込み
```
tokenize = lambda x: x.split()
# 前処理用の機能のFieldをセットアップ
#Field
TEXT = data.Field(sequential=True, tokenize=tokenize, lower=True, batch_first=True)
# データを取得
# The Penn Treebankデータセット。
train_dataset, val_dataset, test_dataset = datasets.PennTreebank.splits(TEXT)
TEXT.build_vocab(train_dataset, vectors=vocab.GloVe(name='6B', dim=300))
#全単語数
vocab_size = len(TEXT.vocab)
print(vocab_size)
# 単語の件数のtop10
print(TEXT.vocab.freqs.most_common(10))
# 単語
print(TEXT.vocab.itos[:10])
#埋め込みベクトルを取得
word_embeddings = TEXT.vocab.vectors
# ハイパーパラメータ
embedding_length = 300
hidden_size = 256
batch_size = 32
# BPTTIteratorは言語モデル用のイテレータ作成を行います。
# textとtarget属性を持ちます。
train_iter, val_iter, test_iter = data.BPTTIterator.splits((train_dataset, val_dataset, test_dataset)
, batch_size=32, bptt_len=30, repeat=False)
print(len(train_iter))
print(len(val_iter))
print(len(test_iter))
for i, batch in enumerate(train_iter):
print("データの形状確認")
print(batch.text.size())
print(batch.target.size())
#BPTTIteratorがBatch firstになってない件は2018/11/24時点では#462がPull requestsがされています。
print("permuteでバッチを先にする")
print(batch.text.permute(1, 0).size())
print(batch.target.permute(1, 0).size())
print("データ目の形状とデータを確認")
text = batch.text.permute(1, 0)
target = batch.target.permute(1, 0)
print(text[1,:].size())
print(target[1,:].size())
print(text[1,:].tolist())
print(target[1,:].tolist())
print("データの単語列を表示")
print([TEXT.vocab.itos[data] for data in text[1,:].tolist()])
print([TEXT.vocab.itos[data] for data in target[1,:].tolist()])
break
```
## ネットワークを定義
```
class LstmLangModel(nn.Module):
def __init__(self, batch_size, hidden_size, vocab_size, embedding_length, weights):
super(LstmLangModel, self).__init__()
self.batch_size = batch_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed = nn.Embedding(vocab_size, embedding_length)
self.embed.weight.data.copy_(weights)
self.lstm = nn.LSTM(embedding_length, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, x, h):
x = self.embed(x)
output_seq, (h, c) = self.lstm(x, h)
# 出力を変形する (batch_size*sequence_length, 隠れ層のユニット数hidden_size)
out = output_seq.reshape(output_seq.size(0)*output_seq.size(1), output_seq.size(2))
out = self.fc(out)
return out, (h, c)
net = LstmLangModel(batch_size, hidden_size, vocab_size, embedding_length, word_embeddings)
net = net.to(device)
# 損失関数、最適化関数を定義
criterion = nn.CrossEntropyLoss()
optim = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()))
```
## 学習
```
num_epochs = 200
train_loss_list = []
# Truncated backpropagation
# 逆伝播を途中で打ち切る
def detach(states):
return [state.detach() for state in states]
for epoch in range(num_epochs):
train_loss = 0
# 初期隠れ状態とセル状態を設定する
states = (torch.zeros(1, batch_size, hidden_size).to(device),
torch.zeros(1, batch_size, hidden_size).to(device))
#train
net.train()
for i, batch in enumerate(train_iter):
text = batch.text.to(device)
labels = batch.target.to(device)
#LSTMの形状に合わせて入力もバッチを先にする。
text = text.permute(1, 0)
labels = labels.permute(1, 0)
optim.zero_grad()
states = detach(states)
outputs, states = net(text, states)
loss = criterion(outputs, labels.reshape(-1))
train_loss += loss.item()
loss.backward()
optim.step()
avg_train_loss = train_loss / len(train_iter)
print ('Epoch [{}/{}], Loss: {loss:.4f}, Perplexity: {perp:5.2f}'
.format(epoch+1, num_epochs, i+1, loss=avg_train_loss, perp=np.exp(avg_train_loss)))
train_loss_list.append(avg_train_loss)
```
## 生成
```
num_samples = 1000 # サンプリングされる単語の数
# モデルをテストする
net.eval()
with torch.no_grad():
text = ""
# 初期隠れ状態とセル状態を設定する
states = (torch.zeros(1, 1, hidden_size).to(device),
torch.zeros(1, 1, hidden_size).to(device))
# ランダムに1単語のIDを選択
input = torch.multinomial(torch.ones(vocab_size), num_samples=1).unsqueeze(1).to(device)
# print("input word", TEXT.vocab.itos[input])
for i in range(num_samples):
# print("input word", TEXT.vocab.itos[input])
output, states = net(input, states)
word_id = output.max(1)[1].item()
# 次のタイムステップのために単語IDを入力
input.fill_(word_id)
# 単語IDから文字を取得
word = TEXT.vocab.itos[word_id]
# textに書き込む
word = '\n' if word == '<eos>' else word + ' '
text += word
# textを表示
print(text)
```
| true |
code
| 0.684211 | null | null | null | null |
|
# Top coding, bottom coding and zero coding
## Outliers
An outlier is a data point which is significantly different from the remaining data. “An outlier is an observation which deviates so much from the other observations as to arouse suspicions that it was generated by a different mechanism.” [D. Hawkins. Identification of Outliers, Chapman and Hall , 1980].
Statistics such as the mean and variance are very susceptible to outliers. In addition, **some Machine Learning models are indeed sensitive to outliers and their performance might be impaired by them**. Thus, it is common practice to engineer the features to minimise the impact of outliers on the performance of these algorithms.
### Nature of outliers
- Genuine extremely high or extremely low values
- Introduced due to mechanical error (wrong measurement)
- Introduced by replacing missing values (NA) by a value out of the distribution (as described in previous lectures)
In some cases, the presence of outliers is informative, and therefore they deserve further study. In this course I will tackle the engineering of those values that do not add any particular extra information, and could as well be eliminated.
## How can we pre-process outliers?
- Mean/median imputation or random sampling
- Discrestisation
- Discard the outliers: process also called Trimming
- Top-coding, bottom-coding and zero-coding: also known as windsorization
### Mean/median imputation or random sampling
If we have reasons to believe that the outliers are due to mechanical error or problems during measurement. This means, if the outliers are in nature similar to missing data, then any of the methods discussed for missing data can be applied to replace outliers. Because the number of outliers is in nature small (otherwise they would not be outliers), it is reasonable to use the mean/median imputation to replace them.
### Discretisation
Discretisation is the transformation of continuous variables into discrete variables. It involves assigning the variable values into defined groups. For example, for the variable age, we could group the observations (people) into buckets / groups like: 0-20, 21-40, 41-60, > 61. This grouping of the variables in ranges is called discretisation. As you can see, any outlier (extremely high) value of age would be included in the > 61 group, therefore minimising its impact. I will discuss more on the different discretisation methods in the "Discretisation" section of this course.
### Trimming
Trimming refers to the removal of the extreme values of a sample. In this procedure, the outliers are identified and those observations removed from the sample. On the down side, these values, may contain useful information for other variables included in the dataset. Thus, likely, we may choose not to remove these observations and handle outliers by top / bottom coding as described below.
## Top-coding, bottom-coding and zero-coding.
**Top-conding**, widely used in econometrics and statistics, means capping the maximum of a distribution at an arbitrarily set value. A top-coded variable is one for which data points whose values are above an upper bound are censored. This means in practical terms that all values above the upper band will be arbitrarily set to the upper band.
Top-coding is common practice in survey data, before it is released to the public. It is used to preserve the anonymity of respondents. For example, high earners may be easily identifiable by their earnings. Thus, by implementing top-coding, that outlier is capped at a certain maximum value and therefore looks like many other observations, it is not uniquely identifiable any more. Top-coding can be also applied to prevent possibly-erroneous outliers from being published.
Bottom-coding is analogous, but on the left side of the distribution. This is, all values below a certain threshold, are capped at that threshold. If the threshold is zero, then it is known as **zero-coding**, e.g. if amounts below zero are reported as zero. Good examples would be the variable "age", or the variable "earnings". It is not possible to have negative age or a negative salary, thus, it is reasonable to cap the lowest values at zero. Any observation with a value under zero must have been introduced by mistake.
Top-coding and bottom-coding are indeed used in practice to remove outliers of variables and therefore prevent model over-fitting. For an example in a financial institution, look at my talk in [pydata](https://www.google.co.uk/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&cad=rja&uact=8&ved=0ahUKEwiEtaG7p6fXAhVI2hoKHWqQBsMQtwIILTAB&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DKHGGlozsRtA&usg=AOvVaw13tQ7UEv3w1k_RLsEbB3aB).
#### Note
Top-coding may affect estimates of the standard errors of the variable, or change the variable distribution, by censoring those values at the far end of the tails.
### Identifying outliers
#### Extreme Value Analysis
The most basic form of outlier detection is Extreme Value Analysis of 1-dimensional data. The key for this method is to determine the statistical tails of the underlying distribution of the variable, and then finding the values that sit at the very end of the tails.
In the typical scenario, the distribution of the variable is Gaussian and thus outliers will lie outside the mean plus or minus 3 times the standard deviation of the variable.
If the variable is not normally distributed, a general approach is to calculate the quantiles, and then the interquantile range (IQR), as follows:
IQR = 75th quantile - 25th quantile
An outlier will sit outside the following upper and lower boundaries:
Upper boundary = 75th quantile + (IQR * 1.5)
Lower boundary = 25th quantile - (IQR * 1.5)
or for extreme cases:
Upper boundary = 75th quantile + (IQR * 3)
Lower boundary = 25th quantile - (IQR * 3)
=======================================================================
Below I will demonstrate top-coding in real-life datasets. We have seen an intuition of how this improves machine learning algorithms in the lecture "Outliers" in the section "Type of problems within variables".
=============================================================================
## Real Life example:
### Predicting Survival on the Titanic: understanding society behaviour and beliefs
Perhaps one of the most infamous shipwrecks in history, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 people on board. Interestingly, by analysing the probability of survival based on few attributes like gender, age, and social status, we can make very accurate predictions on which passengers would survive. Some groups of people were more likely to survive than others, such as women, children, and the upper-class. Therefore, we can learn about the society priorities and privileges at the time.
### Lending Club
**Lending Club** is a peer-to-peer Lending company based in the US. They match people looking to invest money with people looking to borrow money. When investors invest their money through Lending Club, this money is passed onto borrowers, and when borrowers pay their loans back, the capital plus the interest passes on back to the investors. It is a win for everybody as they can get typically lower loan rates and higher investor returns.
If you want to learn more about Lending Club follow this link:
https://www.lendingclub.com/
The Lending Club dataset contains complete loan data for all loans issued through the 2007-2015, including the current loan status (Current, Late, Fully Paid, etc.) and latest payment information. Features (aka variables) include credit scores, number of finance inquiries, address including zip codes and state, and collections among others. Collections indicates whether the customer has missed one or more payments and the team is trying to recover their money.
The file is a matrix of about 890 thousand observations and 75 variables. More detail on this dataset can be found in Kaggle's website: https://www.kaggle.com/wendykan/lending-club-loan-data
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.model_selection import train_test_split
pd.set_option('display.max_columns', None) # to display the total number columns present in the dataset
```
## Titanic dataset
```
# let's load the titanic dataset
data = pd.read_csv('titanic.csv')
data.head()
```
### Top-coding important
Top-coding and bottom-coding, as any other feature pre-processing step, should be determined over the training set, and then transferred onto the test set. This means that we should find the upper and lower bounds in the training set only, and use those bands to cap the values in the test set.
```
# divide dataset into train and test set
X_train, X_test, y_train, y_test = train_test_split(data, data.Survived,
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
```
There are 2 numerical variables in this dataset, Fare and Age. So let's go ahead and find out whether there are values that we could consider outliers
### Fare
```
# First let's plot a histogram to get an idea of the distribution
fig = X_train.Fare.hist(bins=50)
fig.set_title('Fare Paid Distribution')
fig.set_xlabel('Fare')
fig.set_ylabel('Number of Passengers')
sns.kdeplot(X_train.Fare)
```
Because the distribution of Fare is skewed, we should estimate outliers using the quantile method instead of the Gaussian distribution.
```
# visualising outliers using boxplots and whiskers, which provides the quantiles
# and inter-quantile range, with the outliers sitting outside the error bars.
# All the dots in the plot below are outliers according to the 1.5 IQR rule
fig = sns.boxplot(y='Fare', data=X_train)
fig.set_xlabel('Fare')
fig.set_ylabel('Number of Passengers')
```
The outliers, according to the above plot, lie all at the right side of the distribution. This is, some people paid extremely high prices for their tickets.
Therefore, in this variable, only extremely high values will affect the performance of our machine learning models, and we need to do therefore top-coding. Bottom coding in this case it is not necessary. At least not to improve the performance of the machine learning algorithms.
```
# let's look at the values of the quantiles so we can calculate the upper and lower boundaries for the outliers
X_train.Fare.describe()
# top coding: upper boundary for outliers according to interquantile proximity rule
IQR = data.Fare.quantile(0.75) - data.Fare.quantile(0.25)
Upper_fence = X_train.Fare.quantile(0.75) + (IQR * 3)
Upper_fence
```
The upper boundary, above which every value is considered an outlier is a cost of 100 dollars for the Fare.
```
# lets look at the actual number of passengers that paid more than USS 100
print('total passengers: {}'.format(X_train.shape[0]))
print('passengers that paid more than 100: {}'.format(X_train[X_train.Fare>100].shape[0]))
print('percentage of outliers: {}'.format(X_train[X_train.Fare>100].shape[0]/np.float(X_train.shape[0])))
# top-coding: capping the variable Fare at 100
X_train.loc[X_train.Fare>100, 'Fare'] = 100
X_test.loc[X_test.Fare>100, 'Fare'] = 100
X_train.Fare.max(), X_test.Fare.max()
```
This is all we need to remove outliers from a machine learning perspective.
However, note that in the dataset, there are also a few passengers that paid zero for their tickets
```
X_train[X_train.Fare==0].shape
X_train[X_train.Fare==0]
```
The majority of them do not have a Cabin assigned, and could therefore have jumped on the boat illegally. Alternatively, there could also be that that information could not be retrieved, so we do not know how much they paid. But we do know that the cheapest ticket was 5 dollars, see below:
```
X_train[X_train.Fare!=0]['Fare'].min()
```
In situations like this, it is best to discuss with the data owner (in business, someone who knows the data well) the nature of the data, and the importance of the variable.
If the 0 values in this case mean that the data could not be retrieved properly, and therefore is in nature an NaN, one could choose to replace them by a random sample or mean/median imputation, or to do bottom-coding.
If the case of zero corresponds otherwise to people jumping on the boat illegally, one may choose to leave them as zero.
### Age
```
# First let's plot the histogram to get an idea of the distribution
fig = X_train.Age.hist(bins=50)
fig.set_title('Age Distribution')
fig.set_xlabel('Age')
fig.set_ylabel('Number of Passengers')
sns.kdeplot(X_train.Age)
```
Although it does not look strictly normal, we could assume normality and use the Gaussian approach to find outliers. See below.
```
# now let's plot the boxplots and whiskers
fig = sns.boxplot(y='Age', data=X_train)
fig.set_xlabel('Age')
fig.set_ylabel('Number of Passengers')
```
Again, for this variable the outliers lie only on the right of the distribution. Therefore we only need to introduce top-coding.
```
# and let's get the numbers to calculate the upper boundary
X_train.Age.describe()
# Assuming normality
Upper_boundary = X_train.Age.mean() + 3* X_train.Age.std()
Upper_boundary
# let's find out whether there are outliers according to the above boundaries
# remember that Age has ~ 20% missing values
total_passengers = np.float(X_train.shape[0])
print('total passengers: {}'.format(X_train.Age.dropna().shape[0]/total_passengers))
print('passengers older than 73 (Gaussian app): {}'.format(X_train[X_train.Age>73].shape[0]/total_passengers))
X_train.loc[X_train.Age>73, 'Age'] = 73
X_test.loc[X_test.Age>73, 'Age'] = 73
X_train.Age.max(), X_test.Age.max()
```
In the test set, there were no outliers, as the maximum Age value is 70, below the value we used to cap outliers.
## Loan book from Lending Club
```
# we will examine only the income variable, as this is one that typically shows outliers.
# a few people are high earners, and the remaining of the borrowers fall within a normal-ish distribution
data = pd.read_csv('loan.csv', usecols=['annual_inc'], nrows=30000)
data.head()
fig = data.annual_inc.hist(bins=500)
fig.set_xlim(0,500000)
sns.boxplot(y='annual_inc', data=data)
```
As expected, outliers sit on the right of the distribution. Therefore, we will perform top-coding.
```
data.annual_inc.describe()
# because the distribution is not completely normal, I choose to examine outliers with the interquantal
# distance
IQR = data.annual_inc.quantile(0.75) - data.annual_inc.quantile(0.25)
Upper_fence = data.annual_inc.quantile(0.75) + (IQR * 1.5)
Upper_fence_ext = data.annual_inc.quantile(0.75) + (IQR * 3)
Upper_fence, Upper_fence_ext
# let's look at the percentage of high earners within each extreme bucket
total_borrowers = np.float(data.shape[0])
print('total borrowers: {}'.format(data.annual_inc.shape[0]/total_borrowers))
print('borrowers than earn > 146k: {}'.format(data[data.annual_inc>146000].shape[0]/total_borrowers))
print('borrowers than earn > 210k: {}'.format(data[data.annual_inc>210000].shape[0]/total_borrowers))
# top-coding
data['annual_capped'] = np.where(data.annual_inc>210000, 210000, data.annual_inc)
data.describe()
```
We see the effect of capping on the overall distribution of the variable. The standard deviation is smaller, and so is the maximum value.
```
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
data.annual_inc.plot(kind='kde', ax=ax)
data.annual_capped.plot(kind='kde', ax=ax, color = 'red')
lines, labels = ax.get_legend_handles_labels()
labels = ['Income original', 'Income capped']
ax.legend(lines, labels, loc='best')
ax.set_xlim(0,500000)
```
We can observe the effect of top codding on the variable distribution. The maximum value corresponds now to the value we set as a cap. And we observe a peak in that value, that indicates that people that earn more than the cap, are now grouped together under a capped maximum salary.
**That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
| true |
code
| 0.659268 | null | null | null | null |
|
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
titanic = pd.read_csv('./titanic.csv')
titanic.head(3)
```
## Summary statistics
### Summarizing numerical data
- .mean()
- .median()
- .min()
- .maxx()
- .var()
- .std()
- .sum()
- .quantile()
```
titanic['Age'].mean()
titanic['Age'].mode()
titanic.Age.min()
titanic.Age.max()
titanic['Age'].var() #<--Return unbiased variance over requested axis.
titanic['Age'].quantile() #<--Return values at the given quantile over requested axis.
titanic['Age'].std()
titanic['Age'].sum()
```
### summarizing dates
### .agg() method
##### on Single column
```
def pct30(column): return column.quantile(0.3)
titanic['Age'].agg(pct30)#<-- applying agg() on a column using simple function
titanic['Age'].agg(lambda x: x.quantile(.3)) #<-- using lambda function
```
##### on multiple column
```
titanic[['Age', 'Fare']].agg(lambda x: x.quantile(0.3))
```
##### multiple summaries
```
def pct30(column): return column.quantile(0.3)
def pct40(column): return column.quantile(0.4)
titanic['Age'].agg([pct30,pct40])
```
### cumulative statistics
- .cumsum()
- .cummax()
- .cummin()
- .cumprod()
```
pd.DataFrame(titanic['Age'].cumsum()).head(4)
```
## Counting
#### Dropping duplicate names
```
titanic.drop_duplicates(subset = "Pclass")
titanic.drop_duplicates(subset = ["Pclass", 'SibSp'])
```
#### .values_count()
```
pd.DataFrame(titanic['Age'].value_counts())
pd.DataFrame(titanic['Age'].value_counts(sort=True))
pd.DataFrame(titanic['Age'].value_counts(normalize=True))
```
## Group summary satistics
```
titanic[titanic['Sex'] == 'male']['Age'].mean()
titanic[titanic['Sex'] == 'female']['Age'].mean()
titanic.groupby('Sex')['Age'].mean()
titanic.groupby(['Survived', 'Sex'])['Age'].count() # < -- multiple group
titanic.groupby('Sex')['Age'].agg(['count', 'min', 'max'])# <-- multiple stats
titanic.groupby(['Survived', 'Sex'])[['Age', 'SibSp']].mean()
titanic.groupby(['Survived', 'Sex'])[['Age', 'SibSp']].agg(['count', 'min', 'max'])
```
## Pivot tables
**Signature**:
titanic.pivot_table(
values=None,
index=None,
columns=None,
aggfunc='mean',
fill_value=None,
margins=False,
dropna=True,
margins_name='All',
observed=False,
)
```
titanic.groupby('Sex')['Age'].mean()
#pivot and implicitly define agffunc=np.mean
titanic.pivot_table(values = 'Age', index='Sex')
#explicitly define statistics i:e np.median
titanic.pivot_table(values= 'Age', index='Sex', aggfunc=np.median)
#multiple statistics
titanic.pivot_table(values='Age', index='Sex', aggfunc=[np.std, np.median])
```
#### pivot on two varibales
```
#in groupby
# titanic.groupby(['Survived','Sex'])['Age'].mean().unstack()
#pivot on two varibales
titanic.pivot_table(values='Age', index='Sex', columns='Survived')
```
#### filling missing values in pivot table
```
titanic.pivot_table(values='Age', index='Sex', columns='Survived', fill_value=0)
```
#### summing with pivot table
```
titanic.pivot_table(values='Age',
index='Sex',
columns='Survived',
fill_value=0,
margins=True)
titanic.pivot_table(values='Age',
index='Sex',
columns='Survived',
fill_value=0,
margins=True,
margins_name='mean')
```
| true |
code
| 0.451327 | null | null | null | null |
|
# EnKF Assumption Experiments
### Keiran Suchak
Assumptions to test:
* Normality of prior
* Normality of likelihood
* Subsequent normality of posterior
This notebook will make use of the `multivariate_normality()` function from the `pingouin` package to perform multidimensional normality tests.
## Imports
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import pingouin as pg
import seaborn as sns
import sys
%matplotlib inline
sys.path.append('../../../stationsim/')
from ensemble_kalman_filter import EnsembleKalmanFilter, EnsembleKalmanFilterType
from stationsim_gcs_model import Model
np.random.seed(28)
```
## Functions
```
def tidy_dataframe(df, independent_col: str, dependent_cols: list):
output = list()
for i, row in df.iterrows():
for col in dependent_cols:
d = {independent_col: row[independent_col],
'variable': col,
'value': row[col]}
output.append(d)
output = pd.DataFrame(output)
return output
```
## Experiment 0: Testing `pg.multivariate_normality()`
Create a sample of 5000 $x$-$y$ coordinates from a 2-dimensional normal distribution.
```
mean = [0, 0]
cov = [[1, 0], [0, 100]]
x, y = np.random.multivariate_normal(mean, cov, 5000).T
```
Plot samples in $x$-$y$ space.
```
plt.figure()
plt.plot(x, y, 'x')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
Test for normality.
```
X = pd.DataFrame({'x': x, 'y': y})
pg.multivariate_normality(X, alpha=0.05)
```
The test did not find sufficient evidence to reject the null hypothesis, i.e. the data are normally distributed.
Let us now consider data drawn from a distribution that is not gaussian.
In this case, we draw the $x$-$y$ coordinates from two uniform distributions, \[0.0, 1.0\).
```
x, y = np.random.random_sample((2, 5000))
```
Plot samples in $x$-$y$ space.
```
plt.figure()
plt.plot(x, y, 'x')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
Test for normality.
```
X = pd.DataFrame({'x': x, 'y': y})
pg.multivariate_normality(X, alpha=0.05)
```
The test correctly finds sufficient evidence to reject the null hypothesis that the data are normally distributed.
We can make a couple of functions to generate normally- and uniformly-distributed samples of arbitrary size and check that the test works for different sample sizes.
```
def normal_sample_2d(N):
mean = [0, 0]
cov = [[1, 0], [0, 100]]
x, y = np.random.multivariate_normal(mean, cov, N).T
X = pd.DataFrame({'x': x, 'y': y})
return X
def uniform_sample_2d(N):
x, y = np.random.random_sample((2, N))
X = pd.DataFrame({'x': x, 'y': y})
return X
def test_multidim_normality(X):
t = pg.multivariate_normality(X)
return t.normal
```
Now we can run through a collection of different sample sizes, each time generating that number of random samples from both normal and uniform distributions and testing whether `pg.multivariate_normality()` found the samples to be normally distributed, or whether sufficient evidence was found to reject the null hypothesis.
The sample sizes to be used are `[10, 20, 50, 100, 200, 500, 1000, 2000]`.
This selection has been chosen to observe how the test performs on different scales of sample size.
In each case, the testing process shall be run $20$ times to account for the randomness of the samples and the fact that the test may incorrectly consider normally distributed data to be non-normal (or vice-versa).
```
results = list()
sample_sizes = [10, 20, 50, 100, 200, 500, 1000, 2000]
n_runs = 20
for ss in sample_sizes:
for _ in range(n_runs):
d = {'sample_size': ss}
normal_sample = normal_sample_2d(ss)
uniform_sample = uniform_sample_2d(ss)
d['gaussian'] = test_multidim_normality(normal_sample)
d['non-gaussian'] = test_multidim_normality(uniform_sample)
results.append(d)
```
Let's convert these results into a dataframe.
```
results = pd.DataFrame(results)
results.head()
```
We can now find the proprtion of cases in each scenario for which the test correctly accepted/rejected the null hypothesis.
```
proportions = list()
for ss in sample_sizes:
tdf = results.loc[results['sample_size']==ss, ]
d = {'sample_size': ss}
d['gaussian'] = tdf['gaussian'].sum() / len(tdf['gaussian'])
d['non-gaussian'] = tdf['non-gaussian'].sum() / len(tdf['non-gaussian'])
proportions.append(d)
```
Again, converting this to a dataframe.
```
proportions = pd.DataFrame(proportions)
proportions.head()
plt.figure()
plt.semilogx(proportions['sample_size'], proportions['gaussian'],
label='np.random.multivariate_normal')
plt.semilogx(proportions['sample_size'], proportions['non-gaussian'],
label='np.random.random_sample')
plt.xlabel('Sample size')
plt.ylabel('Proportion accepted as gaussian')
plt.legend()
plt.show()
```
From the above figure, we can see that the test correctly identifies data from `np.random.multivariate_normal()` as gaussian the majority of the time for all sample sizes.
We can also see that, for very small sample sizes (i.e. $N<50$), the test typically does not find sufficient evidence to reject the null hypothesis of normality for non-gaussian data.
We should, therefore, ensure that our sample sizes are sufficiently large when using the test with data from the Ensemble Kalman Filter.
It is also worth considering how this scales with the dimensions of the data - when working with state vectors from the Ensemble Kalman Filter, we consider our sample size to be the filter's ensemble size and twice the model population size to be our number of dimensions.
In order to test this, we will need updated version of the functions used to generate samples - the new versions of these functions should generalise such that we can generate $m$-dimensional data samples.
```
def __convert_to_df(Y, m):
d = {'var_{0}'.format(i): Y[i] for i in range(m)}
X = pd.DataFrame(d)
return X
def normal_sample_d(N, m):
mean = np.zeros(m)
cov = np.identity(m)
Y = np.random.multivariate_normal(mean, cov, N).T
X = __convert_to_df(Y, m)
return X
def uniform_sample_d(N, m):
Y = np.random.random_sample((m, N))
X = __convert_to_df(Y, m)
return X
```
Now that we have constructed the functions, let us test them out with $200$ samples from $10$-dimensional distribtutions.
We can start by sampling from the multivariate normal distribution:
```
z = normal_sample_d(200, 10)
z
z.shape
```
And just to check, we can plot the first $2$ dimensions.
```
plt.figure()
plt.plot(z['var_0'], z['var_1'], 'x')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
Similarly we can sample from the uniform distribution.
```
z = uniform_sample_d(200, 10)
z
```
And we can, again, plot the first $2$ dimensions.
```
plt.figure()
plt.plot(z['var_0'], z['var_1'], 'x')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
```
Well it looks like these functions work!
Now we can make use of them in conjunction with `pg.multivariate_normality()` to see how the normality test responds to different population sizes and ensemble sizes (otherwise referred to as different dimensionalities and sample sizes).
```
results = list()
sample_sizes = [50, 100, 200, 500, 1000]
dimensionalities = list(range(5, 105, 5))
n_runs = 20
for ss in sample_sizes:
for dimensionality in dimensionalities:
print(f'Running sample size={ss}, dimensionality={dimensionality}')
for _ in range(n_runs):
# Make dictionary for gaussian data
normal_sample = normal_sample_d(ss, dimensionality)
test_result = test_multidim_normality(normal_sample)
d = {'sample_size': ss,
'dimensionality': dimensionality,
'kind': 'gaussian',
'result': test_result}
results.append(d)
# Make dictionary for non-gaussian data
uniform_sample = uniform_sample_d(ss, dimensionality)
test_result = test_multidim_normality(uniform_sample)
d = {'sample_size': ss,
'dimensionality': dimensionality,
'kind': 'non-gaussian',
'result': test_result}
results.append(d)
```
Convert results to a dataframe.
```
results = pd.DataFrame(results)
results.head()
results['dimensionality'].unique()
```
We now wish to visualise how the number of
To do this, we first create a filtered dataset where we filter out the rows for which the tests returned false.
```
results_f = results.loc[results['result']==True, ['sample_size', 'dimensionality', 'kind']]
results_f.head()
results_f['dimensionality'].unique()
```
We can now create our kde-plot, segregating the data for gaussian and non-gaussian samples.
```
g = sns.FacetGrid(results_f, col='dimensionality', col_wrap=4)
g.map_dataframe(sns.histplot, x='sample_size', hue='kind', kde=True, log_scale=True)
g.set_axis_labels('Sample size', 'Gaussian sample sets')
g.add_legend()
```
## Experiment 1: Pure forecasting
In this experiment, we will not be assimilating any data, i.e. each step of the model ensemble will only consist of the forecast process.
Following each forecast step, the ensemble will be tested for normality using the `multivariate_normality()` function from the `pingouin` package.
In order to keep the process simple at this stage, a small population of agents will be used, allowing us to use a large ensemble size.
This will act as a preliminary experiment towards demonstrating the normality of the ensemble prior distribution.
```
results = list()
# Set up filter parameters
ensemble_size = 50
pop_size = 2
state_vec_length = 2 * pop_size
# Initialise filter with StationSim and params
filter_params = {'vanilla_ensemble_size': ensemble_size,
'state_vector_length': state_vec_length,
'mode': EnsembleKalmanFilterType.STATE}
model_params = {'pop_total': pop_size,
'station': 'Grand_Central',
'do_print': False}
enkf = EnsembleKalmanFilter(Model, filter_params, model_params,
filtering=False, benchmarking=True)
while enkf.active:
enkf.baseline_step()
results.append(enkf.vanilla_state_ensemble.copy())
len(results)
all_xs = list()
all_ys = list()
for i in range(10, len(results), len(results)//10):
state = results[i]
xs = state[::2]
ys = state[1::2]
all_xs.append(xs)
all_ys.append(ys)
plt.figure()
plt.scatter(xs, ys, s=5, marker='.')
plt.xlim((0, 750))
plt.ylim((0, 750))
plt.show()
norm_results = list()
for state in results:
stateT = state.T
normality = pg.multivariate_normality(stateT, alpha=1)
norm_results.append(normality.normal)
sum(norm_results)
print(all_xs[0])
print(all_ys[0])
for xs in all_xs:
plt.figure()
for x in xs:
plt.hist(x, alpha=0.5)
plt.show()
for ys in all_ys:
plt.figure()
for y in ys:
plt.hist(y, alpha=0.5)
plt.show()
```
| true |
code
| 0.436352 | null | null | null | null |
|
# What’s New In Python 3.10
> **See also:**
>
> * [What’s New In Python 3.10](https://docs.python.org/3.10/whatsnew/3.10.html)
```
import sys
assert sys.version_info[:2] >= (3, 10)
```
## Better error messages
### Syntax Errors
* When parsing code that contains unclosed parentheses or brackets the interpreter now includes the location of the unclosed bracket of parentheses instead of displaying `SyntaxError: unexpected EOF`.
* `SyntaxError` exceptions raised by the interpreter will now highlight the full error range of the expression that consistutes the syntax error itself, instead of just where the problem is detected.
* Specialised messages for `SyntaxError` exceptions have been added e.g. for
* missing `:` before blocks
* unparenthesised tuples in comprehensions targets
* missing commas in collection literals and between expressions
* missing `:` and values in dictionary literals
* usage of `=` instead of `==` in comparisons
* usage of `*` in f-strings
### Indentation Errors
* Many `IndentationError` exceptions now have more context.
### Attribute Errors
* `AttributeError` will offer suggestions of similar attribute names in the object that the exception was raised from.
### Name Errors
* `NameError` will offer suggestions of similar variable names in the function that the exception was raised from.
## Structural Pattern Matching
Many functional languages have a `match` expression, for example [Scala](https://www.scala-lang.org/files/archive/spec/2.11/08-pattern-matching.html), [Rust](https://doc.rust-lang.org/reference/expressions/match-expr.html), [F#](https://docs.microsoft.com/en-us/dotnet/fsharp/language-reference/pattern-matching).
A `match` statement takes an expression and compares it to successive patterns given as one or more case blocks. This is superficially similar to a switch statement in C, Java or JavaScript, but much more powerful.
### `match`
The simplest form compares a subject value against one or more literals:
```
def http_error(status):
match status:
case 400:
return "Bad request"
case 401:
return "Unauthorized"
case 403:
return "Forbidden"
case 404:
return "Not found"
case 418:
return "I'm a teapot"
case _:
return "Something else"
```
> **Note:**
>
> Only in this case `_` acts as a wildcard that never fails and **not** as a variable name.
The cases not only check for equality, but rebind variables that match the specified pattern. For example:
```
NOT_FOUND = 404
retcode = 200
match retcode:
case NOT_FOUND:
print('not found')
print(f"Current value of {NOT_FOUND=}")
```
> «If this poorly-designed feature is really added to Python, we lose a principle I’ve always taught students: ‹if you see an undocumented constant, you can always name it without changing the code’s meaning.› The Substitution Principle, learned in algebra? It’ll no longer apply.» – [Brandon Rhodes](https://twitter.com/brandon_rhodes/status/1360226108399099909)
> «… the semantics of this can be quite different from switch. The cases don't simply check equality, they rebind variables that match the specified pattern.» – [Jake VanderPlas](https://twitter.com/jakevdp/status/1359870794877132810)
### Symbolic constants
Patterns may use named constants. These must be dotted names to prevent them from being interpreted as capture variable:
```
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
color = Color(2)
match color:
case color.RED:
print("I see red!")
case color.GREEN:
print("Grass is green")
case color.BLUE:
print("I'm feeling the blues :(")
```
> «… "case CONSTANT" actually matching everything and assigning to a variable named CONSTANT» – [Armin Ronacher](https://twitter.com/mitsuhiko/status/1359263136994516999)
> **See also:**
>
> * [Structural pattern matching for Python](https://lwn.net/Articles/827179/)
> * [PEP 622 – Structural Pattern Matching](https://www.python.org/dev/peps/pep-0622) superseded by
> * [PEP 634: Specification](https://www.python.org/dev/peps/pep-0634)
> * [PEP 635: Motivation and Rationale](https://www.python.org/dev/peps/pep-0635)
> * [PEP 636: Tutorial](https://www.python.org/dev/peps/pep-0636)
> * [github.com/gvanrossum/patma/](https://github.com/gvanrossum/patma/)
> * [playground-622.ipynb on binder](https://mybinder.org/v2/gh/gvanrossum/patma/master?urlpath=lab/tree/playground-622.ipynb)
> * [Tobias Kohn: On the Syntax of Pattern Matching in Python](https://tobiaskohn.ch/index.php/2018/09/18/pattern-matching-syntax-in-python/)
| true |
code
| 0.29015 | null | null | null | null |
|
# Tutorial: Computing with shapes of landmarks in Kendall shape spaces
Lead author: Nina Miolane.
In this tutorial, we show how to use geomstats to perform a shape data analysis. Specifically, we aim to study the difference between two groups of data:
- optical nerve heads that correspond to normal eyes,
- optical nerve heads that correspond to glaucoma eyes.
We wish to investigate if there is a difference in these two groups, and if this difference is a difference in sizes of the optical nerve heads, or a difference in shapes (where the size has been quotiented out).
<img src="figures/optic_nerves.png" />
## Set up
```
import os
import sys
import warnings
sys.path.append(os.path.dirname(os.getcwd()))
warnings.filterwarnings('ignore')
%matplotlib inline
import matplotlib.colors as colors
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import geomstats.backend as gs
import geomstats.datasets.utils as data_utils
from geomstats.geometry.pre_shape import PreShapeSpace, KendallShapeMetric
```
We import the dataset of the optical nerve heads from 22 images of Rhesus monkeys’ eyes (11 monkeys), available in [[PE2015]](#References).
For each monkey, an experimental glaucoma was introduced in one eye, while the second
eye was kept as control. One seeks to observe differences between the glaucoma and the
control eyes. On each image, 5 anatomical landmarks were recorded:
- 1st landmark: superior aspect of the retina,
- 2nd landmark: side of the retina closest to the temporal bone of the skull,
- 3rd landmark: nose side of the retina,
- 4th landmark: inferior point,
- 5th landmark: optical nerve head deepest point.
Label 0 refers to a normal eye, and Label 1 to an eye with glaucoma.
```
nerves, labels, monkeys = data_utils.load_optical_nerves()
print(nerves.shape)
print(labels)
print(monkeys)
```
We extract the landmarks' sets corresponding to the two eyes' nerves of the first monkey, with their corresponding labels.
```
two_nerves = nerves[monkeys==0]
print(two_nerves.shape)
two_labels = labels[monkeys==0]
print(two_labels)
label_to_str = {0: 'Normal nerve', 1: 'Glaucoma nerve'}
label_to_color = {0: (102/255, 178/255, 255/255, 1.), 1: (255/255, 178/255, 102/255, 1.)}
fig = plt.figure()
ax = Axes3D(fig)
ax.set_xlim((2000, 4000))
ax.set_ylim((1000, 5000))
ax.set_zlim((-600, 200))
for nerve, label in zip(two_nerves, two_labels):
x = nerve[:, 0]
y = nerve[:, 1]
z = nerve[:, 2]
verts = [list(zip(x,y,z))]
poly = Poly3DCollection(verts, alpha=0.5)
color = label_to_color[int(label)]
poly.set_color(colors.rgb2hex(color))
poly.set_edgecolor('k')
ax.add_collection3d(poly)
patch_0 = mpatches.Patch(color=label_to_color[0], label=label_to_str[0], alpha=0.5)
patch_1 = mpatches.Patch(color=label_to_color[1], label=label_to_str[1], alpha=0.5)
plt.legend(handles=[patch_0, patch_1], prop={'size': 14})
plt.show()
```
We first try to detect if there are two groups of optical nerve heads, based on the 3D coordinates of the landmarks sets.
```
from geomstats.geometry.euclidean import EuclideanMetric
nerves_vec = nerves.reshape(22, -1)
eucl_metric = EuclideanMetric(nerves_vec.shape[-1])
eucl_dist = eucl_metric.dist_pairwise(nerves_vec)
plt.figure()
plt.imshow(eucl_dist);
```
We do not see any two clear clusters.
We want to investigate if there is a difference between these two groups of shapes - normal nerve versus glaucoma nerve - or if the main difference is merely relative to the global size of the landmarks' sets.
```
m_ambient = 3
k_landmarks = 5
preshape = PreShapeSpace(m_ambient=m_ambient, k_landmarks=k_landmarks)
matrices_metric = preshape.embedding_metric
sizes = matrices_metric.norm(preshape.center(nerves))
plt.figure(figsize=(6, 4))
for label, col in label_to_color.items():
label_sizes = sizes[labels==label]
plt.hist(label_sizes, color=col, label=label_to_str[label], alpha=0.5, bins=10)
plt.axvline(gs.mean(label_sizes), color=col)
plt.legend(fontsize=14)
plt.title('Sizes of optical nerves', fontsize=14);
```
The vertical lines represent the sample mean of each group (normal/glaucoma).
```
plt.figure(figsize=(6, 4))
plt.hist(sizes[labels==1] - sizes[labels==0], alpha=0.5)
plt.axvline(0, color='black')
plt.title('Difference in size of optical nerve between glaucoma and normal eyes', fontsize=14);
```
We perform a hypothesis test, testing if the two samples of sizes have the same average. We use the t-test for related samples, since the sample elements are paired: two eyes for each monkey.
```
from scipy import stats
signif_level = 0.05
tstat, pvalue = stats.ttest_rel(sizes[labels==0], sizes[labels==1])
print(pvalue < signif_level)
```
There is a significative difference, in optical nerve eyes' sizes, between the glaucoma and normal eye.
We want to investigate if there is a difference in shapes, where the size component has been quotiented out.
We project the data to the Kendall pre-shape space, which:
- centers the nerve landmark sets so that they share the same barycenter,
- normalizes the sizes of the landmarks' sets to 1.
```
nerves_preshape = preshape.projection(nerves)
print(nerves_preshape.shape)
print(preshape.belongs(nerves_preshape))
print(gs.isclose(matrices_metric.norm(nerves_preshape), 1.))
```
In order to quotient out the 3D orientation component, we align the landmark sets in the preshape space.
```
base_point = nerves_preshape[0]
nerves_shape = preshape.align(point=nerves_preshape, base_point=base_point)
```
The Kendall metric is a Riemannian metric that takes this alignment into account. It corresponds to the metric of the Kendall shape space, which is the manifold defined as the preshape space quotient by the action of the rotation in m_ambient dimensions, here in 3 dimensions.
```
kendall_metric = KendallShapeMetric(m_ambient=m_ambient, k_landmarks=k_landmarks)
```
We can use it to perform a tangent PCA in the Kendall shape space, and determine if we see a difference in the shapes of the optical nerves.
```
from geomstats.learning.pca import TangentPCA
tpca = TangentPCA(kendall_metric)
tpca.fit(nerves_shape)
plt.plot(
tpca.explained_variance_ratio_)
plt.xlabel("Number of principal tangent components", size=14)
plt.ylabel("Fraction of explained variance", size=14);
```
Two principal components already describe around 60% of the variance. We plot the data projected in the tangent space defined by these two principal components.
```
X = tpca.transform(nerves_shape)
plt.figure(figsize=(12, 12))
for label, col in label_to_color.items():
mask = labels == label
plt.scatter(X[mask, 0], X[mask, 1], color=col, s=100, label=label_to_str[label]);
plt.legend(fontsize=14);
for label, x, y in zip(monkeys, X[:, 0], X[:, 1]):
plt.annotate(
label,
xy=(x, y), xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5),
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.show()
```
The indices represent the monkeys' indices.
In contrast to the above study focusing on the optical nerves' sizes, visual inspection does not reveal any clusters between the glaucoma and normal optical nerves' shapes. We also do not see any obvious pattern between the two optical nerves of the same monkey.
This shows that the difference between the optical nerve heads mainly resides in the over sizes of the optical nerves.
```
dist_pairwise = kendall_metric.dist_pairwise(nerves_shape)
print(dist_pairwise .shape)
plt.figure()
plt.imshow(dist_pairwise);
```
We try a agglomerative hierarchical clustering to investigate if we can cluster in the Kendall shape space.
```
from geomstats.learning.agglomerative_hierarchical_clustering import AgglomerativeHierarchicalClustering
clustering = AgglomerativeHierarchicalClustering(distance='precomputed', n_clusters=2)
clustering.fit(dist_pairwise)
predicted_labels = clustering.labels_
print('True labels:', labels)
print('Predicted labels:', predicted_labels)
accuracy = gs.sum(labels==predicted_labels) / len(labels)
print(f'Accuracy: {accuracy:.2f}')
```
The accuracy is barely above the accuracy of a random classifier, that would assign 0 or 1 with probably 0.5 to each of the shapes. This confirms that the difference that exists between the two groups is mostly due to the landmarks' set size and not their shapes.
## References
.. [PE2015] Patrangenaru and L. Ellingson. Nonparametric Statistics on Manifolds and Their Applications to Object Data, 2015. https://doi.org/10.1201/b18969
| true |
code
| 0.60364 | null | null | null | null |
|
# Portfolio Optimization
- 포트폴리오 최적화 이론에 대한 정리 자료
- **포트폴리오**란, 다양한 자산에 분산하여 투자하는 것을 말함
- **분산투자**를 통해 변동성과 위험을 낮출 수 있음 (계란을 한 바구니에 담지 말라)
- **자산 배분이란?** 위험 대비 수익을 최대화하는 포트폴리오를 구성하는 것
---
#### Tactical Asset Allocation(TAA)
- 위험 대비 수익을 "단기적으로" 최대화
- Smart Beta...
#### Strategic Asset Allocation(SAA)
- 위험 대비 수익을 "장기적으로" 최대화
- 일반적으로 위험과 수익률은 비례 관계
- SAA 모델은 위험(변동성)이 낮으면서 수익률이 높은 포트폴리오를 만드는 것이 목표
- Markowitz, Black-Litterman Model...
---
#### 평균 수익률 (Expectation)
- 월 평균 수익률과 같이 기간이 포함된 경우라면 기하평균을 사용
- $E(R) = \sqrt[N]{R_1 \times R_2... \times R_i}$
- $E(R) = \frac{1}{N}\sum_{i=1}^{N}R_i$
#### 변동성 (Varience)
- 변동성(=위험)은 기댓값으로 부터 얼마나 떨어져있는지를 나타내는 분산과 동일
- $\sigma^2 = Var(R) = \frac{1}{N-1}\sum_{i=1}^{N}(R_i-\bar{R})$
#### 공분산 (Covarience)
- 확률변수가 2개 이상일 때 각 확률변수들이 얼마나 퍼져있는지를 나타내는 값
- $Cov(R^1, R^2) = E[(R^1-\bar{R^1})(R^2-\bar{R^2})] = \frac{1}{N-1}\sum_{i=1}^{N}(R^1-\bar{R^1})(R^2-\bar{R^2})$
#### 상관관계 (Correlation Coefficient)
- 확률변수의 절대적 크기에 영향을 받지 않도록 0과 1사이로 단위화시킨 값
- $\rho = \cfrac{Cov(X,Y)}{Std(X)Std(Y)}, (-1\leq\rho\leq1)$
---
## 포트폴리오 기대수익과 위험 측정
#### 포트폴리오 정의
- 주어진 예산 내에서 자산 별 투자 비중
- 투자할 자산 군을 결정하고 결정한 자산 별 수익률, 변동성 및 상관관계를 계산
- 변동성 대비 수익률이 가장 높은 포트폴리오를 구성하는 것이 목표
- $w = portfolio = [w_1, w_2, ... , w_N]^T, where \sum_{i=1}^{N}w_i = 1$
#### 포트폴리오의 기대 수익 (Weighted Average)
- 개별 자산의 기대수익률과 포트폴리오의 비중을 곱해서 합산
- $w = portfolio = [w_1, w_2, ... , w_N]^T, where \sum_{i=1}^{N}w_i = 1$
- $\mu_p = portfolio \times expectation = [w_1, w_2, ... , w_N][R_1, R_2, ... , R_N]^T$
#### 포트폴리오의 변동성 (=위험)
- $\sigma_p^2 = [w_1, w_2, ... , w_N]
\begin{bmatrix}
\sigma_{11} & \sigma_{12} & \cdots & \sigma_{1n} \\
\vdots & \vdots & \ddots & \vdots \\
\sigma_{n1} & \sigma_{n2} & \cdots & \sigma_{n^2}
\end{bmatrix}
\begin{bmatrix}
w_{1} \\
w_{2} \\
\vdots \\
w_{N} \\
\end{bmatrix}
$
```
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('darkgrid')
%matplotlib inline
def get_dataset(code, start, end):
df = pd.read_pickle("../dataset/{}.p".format(code))[::-1]
df = df[df['date'].between(start, end, inclusive=True)]
df = df.drop(['diff'], axis=1).set_index('date').sort_index()
return df
# KAKAO 2017-01-01 ~ 2018-03-30
# NAVER 2017-01-01 ~ 2018-03-30
kakao = get_dataset('035720', '2017-01-01', '2018-03-30')
naver = get_dataset('035420', '2017-01-01', '2018-03-30')
```
## NAVER vs KAKAO
- 2017-01-01 ~ 2018-03-30 기간의 종가 그래프
- 아래 그래프를 통해 두 종목의 변동성을 비교할 수 있을까?
```
plt.figure(figsize=(12,8))
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
kakao_price = kakao['close']
naver_price = naver['close']
plt.plot(naver_price)
plt.plot(kakao_price)
plt.legend(['NAVER', 'KAKAO'], loc='upper left')
plt.figure(figsize=(12,8))
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
kakao_price = kakao['close']
mean = kakao_price.replace(kakao_price, kakao_price.mean())
plt.plot(kakao_price)
plt.plot(mean)
plt.legend(['KAKAO', 'MEAN'], loc='upper left')
```
## Daily per change
```
plt.figure(figsize=(12,8))
real_returns = kakao_price.pct_change()
plt.bar(real_returns.index,real_returns)
```
## Mean-Varience on Single Stock
```
def income(start, end):
return round((end - start) / start * 100, 2)
def geometric_mean(iterable):
iterable = [i for i in iterable if int(i) is not 0]
a = np.log(iterable)
return np.exp(a.sum()/len(a))
point = kakao_price[0]
result = kakao_price.apply(lambda d: income(point, d))
result.head()
print("Mean of daily income: {}".format(np.mean(result)))
print("Geometric Mean of daily income: {}".format(geometric_mean(result)))
print("Varience of daily income: {}".format(np.var(result)))
print("Standard Deviation of daily income: {}".format(np.std(result)))
```
## Correlation
- r이 -1에 가까울 수록 음의 상관관계, +1에 가까울 수록 양의 상관관계
- r이 -0.1과 +0.1 사이이면, 거의 무시될 수 있는 상관관계
```
naver_price.corr(kakao_price, method='pearson')
```
## Mean-Varience on Portfolio
- 네이버(30%), 카카오(30%), 셀트리온(20%), SK이노베이션(20%)
```
def init_portfolio(stock, ratio, start, end):
dfs = []
for each in stock:
df = get_dataset(each, start, end)['close']
point = df[0]
result = df.apply(lambda d: income(point, d))
dfs.append(result)
return pd.concat(dfs, axis=1, keys=stock)
def port_mean_var(avg_ret_, var_covar_, w_):
port_ret = np.dot(w_, avg_ret_)
port_std = np.dot(np.dot(w_, var_covar_), w_.T)
return port_ret, port_std
stock = ['035420', '035720', '068270', '096770']
ratio = [0.3, 0.3, 0.2, 0.2]
df = init_portfolio(stock, ratio, '2017-01-01', '2018-03-30')
df.head()
avg_ret = df.mean()
var_covar = df.cov()
w = np.array(ratio).T
mean, var = port_mean_var(avg_ret, var_covar, w)
print("Mean of portfolio: {}".format(mean))
print("Varience of portfolio: {}".format(var))
```
| true |
code
| 0.472988 | null | null | null | null |
|
# Improved feature engineering
*Anders Poirel - 11-02-2020*
Ideas I'll be building on
- seperating by city (data has different structure between the cities, avoids needing to build a more complex model that captures feature interactions)
- using the lifecycle of the mosquito: new mostiquos become adults 1-3 weeks after eggs are laid in water. Therefore, we could expect a lot of cases if the previous 2-3week/~month was humid
**Note**: I realized I used *median* absolute error instead of mean absolute error in the previous notebook, which explain why my CV scores were so far from the test set scores!
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn
from os.path import join
DATA_PATH = '../data/raw/'
```
## Acquiring the data
```
X_test_o = pd.read_csv(join(DATA_PATH, 'dengue_features_test.csv'))
X_train_o = pd.read_csv(join(DATA_PATH, 'dengue_features_train.csv'))
y_train_o = pd.read_csv(join(DATA_PATH, 'dengue_labels_train.csv'))
```
### Preprocessing
```
X_train = pd.get_dummies(X_train_o, columns = ['city'], drop_first = True)
X_test = pd.get_dummies(X_test_o, columns = ['city'], drop_first = True)
X_train = X_train.drop('week_start_date', axis = 1)
X_test = X_test.drop('week_start_date', axis = 1)
```
Drop features that have correlation 1 with other features
```
X_train.drop(
['reanalysis_sat_precip_amt_mm', 'reanalysis_dew_point_temp_k',
'reanalysis_tdtr_k'],
axis = 1,
inplace = True
)
X_test.drop(
['reanalysis_sat_precip_amt_mm', 'reanalysis_dew_point_temp_k',
'reanalysis_tdtr_k'],
axis = 1,
inplace = True
)
y_train
```
### Precipitation at several time lags
First, we split the data by city:
```
X_train_sj = X_train[X_train['city_sj'] == 1]
X_train_iq = X_train[X_train['city_sj'] == 0]
X_test_sj = X_test[X_test['city_sj'] == 1]
X_test_iq = X_test[X_test['city_sj'] == 0]
y_train_sj = y_train[y_train['city'] == 'sj']['total_cases']
y_train_iq = y_train[y_train['city'] == 'iq']['total_cases']
def precip_n_weeks(k, n, precips):
if k - n < 0:
re turn .0
else:
return precips[k - n]
train_precip_sj = X_train_sj['precipitation_amt_mm']
train_precip_iq = X_train_iq['precipitation_amt_mm']
test_precip_sj = X_test_sj['precipitation_amt_mm']
test_precip_iq = X_test_iq['precipitation_amt_mm']
```
We re-index the series for Iquitos so that they start from 0 and our code can run properly
```
iq_train_index = list(range(len(train_precip_iq)))
iq_test_index = list(range(len(test_precip_iq)))
train_precip_iq.index = iq_train_index
test_precip_iq.index = iq_test_index
X_train_sj['precip_2'] = [precip_n_weeks(k, 2, train_precip_sj)
for k in range(len(train_precip_sj))]
X_train_sj['precip_3'] = [precip_n_weeks(k, 3, train_precip_sj)
for k in range(len(train_precip_sj))]
X_train_sj['precip_4'] = [precip_n_weeks(k, 4, train_precip_sj)
for k in range(len(train_precip_sj))]
X_test_sj['precip_2'] = [precip_n_weeks(k, 2, test_precip_sj)
for k in range(len(test_precip_sj))]
X_test_sj['precip_3'] = [precip_n_weeks(k, 3, test_precip_sj)
for k in range(len(test_precip_sj))]
X_test_sj['precip_4'] = [precip_n_weeks(k, 4, test_precip_sj)
for k in range(len(test_precip_sj))]
X_train_iq['precip_2'] = [precip_n_weeks(k, 2, train_precip_iq)
for k in range(len(train_precip_iq))]
X_train_iq['precip_3'] = [precip_n_weeks(k, 3, train_precip_iq)
for k in range(len(train_precip_iq))]
X_train_iq['precip_4'] = [precip_n_weeks(k, 4, train_precip_iq)
for k in range(len(train_precip_iq))]
X_test_iq['precip_2'] = [precip_n_weeks(k, 2, test_precip_iq)
for k in range(len(test_precip_iq))]
X_test_iq['precip_3'] = [precip_n_weeks(k, 3, test_precip_iq)
for k in range(len(test_precip_iq))]
X_test_iq['precip_4'] = [precip_n_weeks(k, 4, test_precip_iq)
for k in range(len(test_precip_iq))]
```
Let's check that this worked as intended:
```
X_test_sj.head(30)
```
## Building the models
```
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.model_selection import (cross_validate, TimeSeriesSplit,
RandomizedSearchCV)
```
#### ElasticNet with penalty
San Jose:
```
en_sj = Pipeline([
('scale', StandardScaler()),
('impute_m', SimpleImputer()),
('en', LinearRegression())
])
cv_res_sj = cross_validate(
estimator = en_sj,
X = X_train_sj,
y = y_train_sj,
cv = TimeSeriesSplit(n_splits = 10),
scoring = 'neg_mean_absolute_error',
n_jobs = -1
)
en_sj_score = np.mean(cv_res_sj['test_score'])
en_sj_score
en_sj.fit(X_train_sj, y_train_sj)
y_pred_sj = en_sj.predict(X_train_sj)
```
Iquitos:
```
en_iq = Pipeline([
('scale', StandardScaler()),
('impute_m', SimpleImputer()),
('en', ElasticNet(alpha = 10))
])
cv_res_iq = cross_validate(
estimator = en_iq,
X = X_train_iq,
y = y_train_iq,
cv = TimeSeriesSplit(n_splits = 10),
scoring = 'neg_mean_absolute_error',
n_jobs = -1
)
en_iq_score = np.mean(cv_res_iq['test_score'])
en_iq_score
y_train_iq.mean()
```
Something is really strange here... both models have large MAEs (close to the means values of the targets for each)
```
plt.style.use('default')
```
We get the date data for each city:
```
sj_dates = X_train_o[X_train_o['city'] == 'sj']['week_start_date']
iq_dates = X_train_o[X_train_o['city'] == 'iq']['week_start_date']
ax = plt.axes()
ax.plot(sj_dates, y_pred_sj)
ax.plot(sj_dates, y_train_sj)
```
It appears that the model is predicting very close to the mean
### Building a submission
```
submission = pd.read_csv(join(DATA_PATH, 'submission_format.csv'))
y_pred = poly_model_3.predict(X_test)
submission['total_cases'] = np.round(y_pred).astype(int)
submission
submission.to_csv('../models/baseline.csv', index = False)
```
| true |
code
| 0.282877 | null | null | null | null |
|
## Facial Filters
Using your trained facial keypoint detector, you can now do things like add filters to a person's face, automatically. In this optional notebook, you can play around with adding sunglasses to detected face's in an image by using the keypoints detected around a person's eyes. Checkout the `images/` directory to see what pther .png's have been provided for you to try, too!
<img src="images/face_filter_ex.png" width=60% height=60%/>
Let's start this process by looking at a sunglasses .png that we'll be working with!
```
# import necessary resources
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import cv2
# load in sunglasses image with cv2 and IMREAD_UNCHANGED
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# plot our image
plt.imshow(sunglasses)
# print out its dimensions
print('Image shape: ', sunglasses.shape)
```
## The 4th dimension
You'll note that this image actually has *4 color channels*, not just 3 as your avg RGB image does. This is due to the flag we set `cv2.IMREAD_UNCHANGED`, which tells this to read in another color channel.
#### Alpha channel
It has the usual red, blue, and green channels any color image has, and the 4th channel respresents the **transparency level of each pixel** in the image; this is often called the **alpha** channel. Here's how the transparency channel works: the lower the value, the more transparent, or see-through, the pixel will become. The lower bound (completely transparent) is zero here, so any pixels set to 0 will not be seen; these look like white background pixels in the image above, but they are actually totally transparent.
This transparent channel allows us to place this rectangular image of sunglasses on an image of a face and still see the face area that is techically covered by the transparentbackground of the sunglasses image!
Let's check out the alpha channel of our sunglasses image in the next Python cell. Because many of the pixels in the background of the image have an alpha value of 0, we'll need to explicitly print out non-zero values if we want to see them.
```
# print out the sunglasses transparency (alpha) channel
alpha_channel = sunglasses[:,:,3]
print ('The alpha channel looks like this (black pixels = transparent): ')
plt.imshow(alpha_channel, cmap='gray')
# just to double check that there are indeed non-zero values
# let's find and print out every value greater than zero
values = np.where(alpha_channel != 0)
print ('The non-zero values of the alpha channel are: ')
print (values)
```
#### Overlaying images
This means that when we place this sunglasses image on top of another image, we can use the transparency channel as a filter:
* If the pixels are non-transparent (alpha_channel > 0), overlay them on the new image
#### Keypoint locations
In doing this, it's helpful to understand which keypoint belongs to the eyes, mouth, etc., so in the image below we also print the index of each facial keypoint directly on the image so you can tell which keypoints are for the eyes, eyebrows, etc.,
<img src="images/landmarks_numbered.jpg" width=50% height=50%/>
It may be useful to use keypoints that correspond to the edges of the face to define the width of the sunglasses, and the locations of the eyes to define the placement.
Next, we'll load in an example image. Below, you've been given an image and set of keypoints from the provided training set of data, but you can use your own CNN model to generate keypoints for *any* image of a face (as in Notebook 3) and go through the same overlay process!
```
# load in training data
key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')
# print out some stats about the data
print('Number of images: ', key_pts_frame.shape[0])
# helper function to display keypoints
def show_keypoints(image, key_pts):
"""Show image with keypoints"""
plt.imshow(image)
plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')
# a selected image
n = 120
image_name = key_pts_frame.iloc[n, 0]
image = mpimg.imread(os.path.join('data/training/', image_name))
key_pts = key_pts_frame.iloc[n, 1:].as_matrix()
key_pts = key_pts.astype('float').reshape(-1, 2)
print('Image name: ', image_name)
plt.figure(figsize=(5, 5))
show_keypoints(image, key_pts)
plt.show()
```
Next, you'll see an example of placing sunglasses on the person in the loaded image.
Note that the keypoints are numbered off-by-one in the numbered image above, and so `key_pts[0,:]` corresponds to the first point (1) in the labelled image.
```
# Display sunglasses on top of the image in the appropriate place
# copy of the face image for overlay
image_copy = np.copy(image)
# top-left location for sunglasses to go
# 17 = edge of left eyebrow
x = int(key_pts[17, 0])
y = int(key_pts[17, 1])
# height and width of sunglasses
# h = length of nose
h = int(abs(key_pts[27,1] - key_pts[34,1]))
# w = left to right eyebrow edges
w = int(abs(key_pts[17,0] - key_pts[26,0]))
# read in sunglasses
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# resize sunglasses
new_sunglasses = cv2.resize(sunglasses, (w, h), interpolation = cv2.INTER_CUBIC)
# get region of interest on the face to change
roi_color = image_copy[y:y+h,x:x+w]
# find all non-transparent pts
ind = np.argwhere(new_sunglasses[:,:,3] > 0)
# for each non-transparent point, replace the original image pixel with that of the new_sunglasses
for i in range(3):
roi_color[ind[:,0],ind[:,1],i] = new_sunglasses[ind[:,0],ind[:,1],i]
# set the area of the image to the changed region with sunglasses
image_copy[y:y+h,x:x+w] = roi_color
# display the result!
plt.imshow(image_copy)
```
#### Further steps
Look in the `images/` directory to see other available .png's for overlay! Also, you may notice that the overlay of the sunglasses is not entirely perfect; you're encouraged to play around with the scale of the width and height of the glasses and investigate how to perform [image rotation](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html) in OpenCV so as to match an overlay with any facial pose.
| true |
code
| 0.549097 | null | null | null | null |
|
# Acme: Quickstart
## Guide to installing Acme and training your first D4PG agent.
# <a href="https://colab.research.google.com/github/deepmind/acme/blob/master/examples/quickstart.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Select your environment library
Note: `dm_control` requires a valid Mujoco license.
```
environment_library = 'gym' # @param ['dm_control', 'gym']
```
## Add your Mujoco license here
Note: only required for `dm_control`.
```
mjkey = """
""".strip()
if not mjkey and environment_library == 'dm_control':
raise ValueError(
'A Mujoco license is required for `dm_control`, if you do not have on '
'consider selecting `gym` from the dropdown menu in the cell above.')
```
## Installation
### Install Acme
```
!pip install dm-acme
!pip install dm-acme[reverb]
!pip install dm-acme[tf]
```
### Install the environment library
Without a valid license you won't be able to use the `dm_control` environments but can still follow this colab using the `gym` environments.
If you have a personal Mujoco license (_not_ an institutional one), you may
need to follow the instructions at https://research.google.com/colaboratory/local-runtimes.html to run a Jupyter kernel on your local machine.
This will allow you to install `dm_control` by following instructions in
https://github.com/deepmind/dm_control and using a personal MuJoCo license.
```
#@test {"skip": true}
if environment_library == 'dm_control':
mujoco_dir = "$HOME/.mujoco"
# Install OpenGL dependencies
!apt-get update && apt-get install -y --no-install-recommends \
libgl1-mesa-glx libosmesa6 libglew2.0
# Get MuJoCo binaries
!wget -q https://www.roboti.us/download/mujoco200_linux.zip -O mujoco.zip
!unzip -o -q mujoco.zip -d "$mujoco_dir"
# Copy over MuJoCo license
!echo "$mjkey" > "$mujoco_dir/mjkey.txt"
# Install dm_control
!pip install dm_control
# Configure dm_control to use the OSMesa rendering backend
%env MUJOCO_GL=osmesa
# Check that the installation succeeded
try:
from dm_control import suite
env = suite.load('cartpole', 'swingup')
pixels = env.physics.render()
except Exception as e:
raise RuntimeError(
'Something went wrong during installation. Check the shell output above '
'for more information. If you do not have a valid Mujoco license, '
'consider selecting `gym` in the dropdown menu at the top of this '
'Colab.') from e
else:
del suite, env, pixels
elif environment_library == 'gym':
!pip install gym
```
### Install visualization packages
```
!sudo apt-get install -y xvfb ffmpeg
!pip install imageio
!pip install PILLOW
!pip install pyvirtualdisplay
```
## Import Modules
```
import IPython
from acme import environment_loop
from acme import specs
from acme import wrappers
from acme.agents.tf import d4pg
from acme.tf import networks
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import numpy as np
import sonnet as snt
# Import the selected environment lib
if environment_library == 'dm_control':
from dm_control import suite
elif environment_library == 'gym':
import gym
# Imports required for visualization
import pyvirtualdisplay
import imageio
import base64
# Set up a virtual display for rendering.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
```
## Load an environment
We can now load an environment. In what follows we'll create an environment and grab the environment's specifications.
```
if environment_library == 'dm_control':
environment = suite.load('cartpole', 'balance')
elif environment_library == 'gym':
environment = gym.make('MountainCarContinuous-v0')
environment = wrappers.GymWrapper(environment) # To dm_env interface.
else:
raise ValueError(
"Unknown environment library: {};".format(environment_library) +
"choose among ['dm_control', 'gym'].")
# Make sure the environment outputs single-precision floats.
environment = wrappers.SinglePrecisionWrapper(environment)
# Grab the spec of the environment.
environment_spec = specs.make_environment_spec(environment)
```
## Create a D4PG agent
```
#@title Build agent networks
# Get total number of action dimensions from action spec.
num_dimensions = np.prod(environment_spec.actions.shape, dtype=int)
# Create the shared observation network; here simply a state-less operation.
observation_network = tf2_utils.batch_concat
# Create the deterministic policy network.
policy_network = snt.Sequential([
networks.LayerNormMLP((256, 256, 256), activate_final=True),
networks.NearZeroInitializedLinear(num_dimensions),
networks.TanhToSpec(environment_spec.actions),
])
# Create the distributional critic network.
critic_network = snt.Sequential([
# The multiplexer concatenates the observations/actions.
networks.CriticMultiplexer(),
networks.LayerNormMLP((512, 512, 256), activate_final=True),
networks.DiscreteValuedHead(vmin=-150., vmax=150., num_atoms=51),
])
# Create a logger for the agent and environment loop.
agent_logger = loggers.TerminalLogger(label='agent', time_delta=10.)
env_loop_logger = loggers.TerminalLogger(label='env_loop', time_delta=10.)
# Create the D4PG agent.
agent = d4pg.D4PG(
environment_spec=environment_spec,
policy_network=policy_network,
critic_network=critic_network,
observation_network=observation_network,
sigma=1.0,
logger=agent_logger,
checkpoint=False
)
# Create an loop connecting this agent to the environment created above.
env_loop = environment_loop.EnvironmentLoop(
environment, agent, logger=env_loop_logger)
```
## Run a training loop
```
# Run a `num_episodes` training episodes.
# Rerun this cell until the agent has learned the given task.
env_loop.run(num_episodes=100)
```
## Visualize an evaluation loop
### Helper functions for rendering and vizualization
```
# Create a simple helper function to render a frame from the current state of
# the environment.
if environment_library == 'dm_control':
def render(env):
return env.physics.render(camera_id=0)
elif environment_library == 'gym':
def render(env):
return env.environment.render(mode='rgb_array')
else:
raise ValueError(
"Unknown environment library: {};".format(environment_library) +
"choose among ['dm_control', 'gym'].")
def display_video(frames, filename='temp.mp4'):
"""Save and display video."""
# Write video
with imageio.get_writer(filename, fps=60) as video:
for frame in frames:
video.append_data(frame)
# Read video and display the video
video = open(filename, 'rb').read()
b64_video = base64.b64encode(video)
video_tag = ('<video width="320" height="240" controls alt="test" '
'src="data:video/mp4;base64,{0}">').format(b64_video.decode())
return IPython.display.HTML(video_tag)
```
### Run and visualize the agent in the environment for an episode
```
timestep = environment.reset()
frames = [render(environment)]
while not timestep.last():
# Simple environment loop.
action = agent.select_action(timestep.observation)
timestep = environment.step(action)
# Render the scene and add it to the frame stack.
frames.append(render(environment))
# Save and display a video of the behaviour.
display_video(np.array(frames))
```
| true |
code
| 0.713004 | null | null | null | null |
|
```
# default_exp models.XCMPlus
```
# XCM (An Explainable Convolutional Neural Network for Multivariate Time Series Classification)
> This is an unofficial PyTorch implementation by Ignacio Oguiza of - [email protected] based on Temporal Convolutional Network (Bai, 2018).
**References:**
* Fauvel, K., Lin, T., Masson, V., Fromont, É., & Termier, A. (2020). XCM: An Explainable Convolutional Neural Network for Multivariate Time Series Classification. arXiv preprint arXiv:2009.04796.
* Official XCM PyTorch implementation: not available as of Nov 27th, 2020
```
#export
from tsai.imports import *
from tsai.utils import *
from tsai.models.layers import *
from tsai.models.utils import *
from tsai.models.explainability import *
#export
# This is an unofficial PyTorch implementation by Ignacio Oguiza - [email protected] based on:
# Fauvel, K., Lin, T., Masson, V., Fromont, É., & Termier, A. (2020). XCM: An Explainable Convolutional Neural Network for
# Multivariate Time Series Classification. arXiv preprint arXiv:2009.04796.
# Official XCM PyTorch implementation: not available as of Nov 27th, 2020
class XCMPlus(nn.Sequential):
def __init__(self, c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1., flatten:bool=False, custom_head:callable=None,
concat_pool:bool=False, fc_dropout:float=0., bn:bool=False, y_range:tuple=None, **kwargs):
window_size = int(round(seq_len * window_perc, 0))
backbone = _XCMPlus_Backbone(c_in, c_out, seq_len=seq_len, nf=nf, window_perc=window_perc)
self.head_nf = nf
self.c_out = c_out
self.seq_len = seq_len
if custom_head: head = custom_head(self.head_nf, c_out, seq_len, **kwargs)
else: head = self.create_head(self.head_nf, c_out, seq_len, flatten=flatten, concat_pool=concat_pool,
fc_dropout=fc_dropout, bn=bn, y_range=y_range)
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
def create_head(self, nf, c_out, seq_len=None, flatten=False, concat_pool=False, fc_dropout=0., bn=False, y_range=None):
if flatten:
nf *= seq_len
layers = [Flatten()]
else:
if concat_pool: nf *= 2
layers = [GACP1d(1) if concat_pool else GAP1d(1)]
layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
def show_gradcam(self, x, y=None, detach=True, cpu=True, apply_relu=True, cmap='inferno', figsize=None, **kwargs):
att_maps = get_attribution_map(self, [self.backbone.conv2dblock, self.backbone.conv1dblock], x, y=y, detach=detach, cpu=cpu, apply_relu=apply_relu)
att_maps[0] = (att_maps[0] - att_maps[0].min()) / (att_maps[0].max() - att_maps[0].min())
att_maps[1] = (att_maps[1] - att_maps[1].min()) / (att_maps[1].max() - att_maps[1].min())
figsize = ifnone(figsize, (10, 10))
fig = plt.figure(figsize=figsize, **kwargs)
ax = plt.axes()
plt.title('Observed variables')
im = ax.imshow(att_maps[0], cmap=cmap)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plt.show()
fig = plt.figure(figsize=figsize, **kwargs)
ax = plt.axes()
plt.title('Time')
im = ax.imshow(att_maps[1], cmap=cmap)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plt.show()
class _XCMPlus_Backbone(Module):
def __init__(self, c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1.):
window_size = int(round(seq_len * window_perc, 0))
self.conv2dblock = nn.Sequential(*[Unsqueeze(1), Conv2d(1, nf, kernel_size=(1, window_size), padding='same'), BatchNorm(nf), nn.ReLU()])
self.conv2d1x1block = nn.Sequential(*[nn.Conv2d(nf, 1, kernel_size=1), nn.ReLU(), Squeeze(1)])
self.conv1dblock = nn.Sequential(*[Conv1d(c_in, nf, kernel_size=window_size, padding='same'), BatchNorm(nf, ndim=1), nn.ReLU()])
self.conv1d1x1block = nn.Sequential(*[nn.Conv1d(nf, 1, kernel_size=1), nn.ReLU()])
self.concat = Concat()
self.conv1d = nn.Sequential(*[Conv1d(c_in + 1, nf, kernel_size=window_size, padding='same'), BatchNorm(nf, ndim=1), nn.ReLU()])
def forward(self, x):
x1 = self.conv2dblock(x)
x1 = self.conv2d1x1block(x1)
x2 = self.conv1dblock(x)
x2 = self.conv1d1x1block(x2)
out = self.concat((x2, x1))
out = self.conv1d(out)
return out
from tsai.data.all import *
from tsai.models.XCM import *
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms = [None, Categorize()]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
model = XCMPlus(dls.vars, dls.c, dls.len)
learn = Learner(dls, model, metrics=accuracy)
xb, yb = dls.one_batch()
bs, c_in, seq_len = xb.shape
c_out = len(np.unique(yb))
model = XCMPlus(c_in, c_out, seq_len, fc_dropout=.5)
test_eq(model(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len, concat_pool=True)
test_eq(model(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len)
test_eq(model(xb).shape, (bs, c_out))
test_eq(count_parameters(XCMPlus(c_in, c_out, seq_len)), count_parameters(XCM(c_in, c_out, seq_len)))
model
model.show_gradcam(xb[0], yb[0])
bs = 16
n_vars = 3
seq_len = 12
c_out = 10
xb = torch.rand(bs, n_vars, seq_len)
new_head = partial(conv_lin_3d_head, d=(5, 2))
net = XCMPlus(n_vars, c_out, seq_len, custom_head=new_head)
print(net(xb).shape)
net.head
bs = 16
n_vars = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
net = XCMPlus(n_vars, c_out, seq_len)
change_model_head(net, create_pool_plus_head, concat_pool=False)
print(net(xb).shape)
net.head
#hide
out = create_scripts(); beep(out)
```
| true |
code
| 0.768038 | null | null | null | null |
|
## Linear least-squares and a bland dense network
We're going to use the MIT-BIH datasets to train and test a basic feedforward network and see how it does. We'll compare the results to a linear regression.
We'll use two different inputs: a mostly unprocessed version of the dataset, and a version in the frequency domain obtained by applying the FFT.
```
import datetime
import os
import logging
import numpy as np
import tensorflow as tf
import tools.plot as plot
import tools.train as train
import tools.models as models
## Read in data
files = ("../data/mitbih_train.csv", "../data/mitbih_test.csv")
inputs, labels, sparse_labels, df = train.preprocess(*files, fft=False)
inputs_fft = train.dataset_fft(inputs)
train.class_count(df)
```
Let's look at a few random samples of the training data:
```
plot.plot_ecg(files[0], 125, 1)
```
### Least-squares
Let's try least-squares regression with numpy.
```
lstsq_soln = np.linalg.lstsq(inputs["train"], labels["train"], rcond=None)
lstsq_soln_fft = np.linalg.lstsq(inputs_fft["train"], labels["train"], rcond=None)
print("Rank of training dataset:", lstsq_soln[2])
print("Rank of training dataset after (real) FFT:", lstsq_soln_fft[2])
```
Now let's see how accurate it is.
```
def lstsq_accuracy(inputs, labels, coeffs):
predict = {}
accuracy = {}
for key in inputs:
predict[key] = np.argmax(np.dot(inputs[key], coeffs), axis=1)
num_correct = np.sum(
labels[key][range(labels[key].shape[0]), predict[key]] == 1
)
accuracy[key] = num_correct / labels[key].shape[0]
print("Training accuracy:", accuracy["train"])
print("Test accuracy:", accuracy["test"])
return predict
print("Regular least-squares")
predict = lstsq_accuracy(inputs, labels, lstsq_soln[0])
plot.plot_cm(sparse_labels["test"], predict["test"], classes=np.arange(5), normalize=True)
print("After FFT")
predict_fft = lstsq_accuracy(inputs_fft, labels, lstsq_soln_fft[0])
plot.plot_cm(sparse_labels["test"], predict_fft["test"], classes=np.arange(5), normalize=True)
```
### Dense feed-forward network
Let's try an unregularized, bland feed-forward network with a couple of hidden layers.
```
# Tensorboard logging
rightnow = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
nofftpath = os.path.join("..", "logs", rightnow, "nofft")
config = {
"optimizer": "Nadam",
"loss": "categorical_crossentropy",
"batch_size": 200,
"val_split": 0.05,
"epochs": 300,
"verbose": 0,
"patience": 20,
"logdir": nofftpath,
}
inputsize = inputs["train"].shape[1]
ncategories = labels["train"].shape[1]
hiddenlayers = [(100, "relu")]
# Suppress tensorflow warnings about internal deprecations
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
print("Unprocessed data")
model = models.create_dense(inputsize, hiddenlayers, ncategories)
history = train.train_print(model, inputs, labels, config)
plot.plot_fit_history(history)
test_pred = np.argmax(model.predict(inputs["test"]), axis=1)
plot.plot_cm(
sparse_labels["test"],
test_pred,
classes=np.array(["N", "S", "V", "F", "Q"]),
normalize=True,
)
# Tensorboard logging
fftpath = os.path.join("..", "logs", rightnow, "fft")
config_fft = config
config_fft["logdir"] = fftpath
print("After FFT")
model_fft = models.create_dense(inputs_fft["train"].shape[1], hiddenlayers, ncategories)
history_fft = train.train_print(model_fft, inputs_fft, labels, config_fft)
plot.plot_fit_history(history_fft)
test_pred_fft = np.argmax(model_fft.predict(inputs_fft["test"]), axis=1)
plot.plot_cm(
sparse_labels["test"],
test_pred_fft,
classes=np.array(["N", "S", "V", "F", "Q"]),
normalize=True,
)
```
The results don't tend to be very consistent. The final test accuracy varies from run to run generally fairly significantly and it's not clear if the FFT "does" anything for the accuracy of the training.
| true |
code
| 0.562056 | null | null | null | null |
|
### Data Visualization
#### `matplotlib` - from the documentation:
https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html
`matplotlib.pyplot` is a collection of command style functions that make matplotlib work like MATLAB. <br>
Each pyplot function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc.
In `matplotlib.pyplot` various states are preserved across function calls, so that it keeps track of things like the current figure and plotting area, and the plotting functions are directed to the current axes.<br>
"axes" in most places in the documentation refers to the axes part of a figure and not the strict mathematical term for more than one axis).
```
%matplotlib inline
import matplotlib.pyplot as plt
```
Call signatures::
```
plot([x], y, [fmt], data=None, **kwargs)
plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
```
Quick plot
The main usage of `plt` is the `plot()` and `show()` functions
```
plt.plot()
plt.show()
```
List
```
plt.plot([8, 24, 27, 42])
plt.ylabel('numbers')
plt.show()
# Plot the two lists, add axes labels
x=[4,5,6,7]
y=[2,5,1,7]
```
`matplotlib` can use *format strings* to quickly declare the type of plots you want. Here are *some* of those formats:
|**Character**|**Description**|
|:-----------:|:--------------|
|'--'|Dashed line|
|':'|Dotted line|
|'o'|Circle marker|
|'^'|Upwards triangle marker|
|'b'|Blue|
|'c'|Cyan|
|'g'|Green|
```
plt.plot([3, 4, 9, 20], 'gs')
plt.axis([-1, 4, 0, 22])
plt.show()
plt.plot([3, 4, 9, 20], 'b^--', linewidth=2, markersize=12)
plt.show()
plt.plot([3, 4, 9, 20], color='blue', marker='^', linestyle='dashed', linewidth=2, markersize=12)
plt.show()
# Plot a list with 10 numbers with a magenta dotted line and circles for points.
import numpy as np
# evenly sampled time
time = np.arange(0, 7, 0.3)
# gene expression
ge = np.arange(1, 8, 0.3)
# red dashes, blue squares and green triangles
plt.plot(time, ge, 'r--', time, ge**2, 'bs', time, ge**3, 'g^')
plt.show()
```
linestyle or ls [ '-' | '--' | '-.' | ':' |
```
lines = plt.plot([1, 2, 3])
plt.setp(lines)
names = ['A', 'B', 'C', 'D']
values = [7, 20, 33, 44]
values1 = np.random.rand(100)
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.bar(names, values)
plt.subplot(132)
plt.scatter(names, values)
plt.subplot(133)
plt.hist(values1)
plt.suptitle('Categorical Plotting')
plt.show()
import pandas as pd
df_iris = pd.read_csv('https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv')
df_iris.head()
x1 = df_iris.petal_length
y1 = df_iris.petal_width
x2 = df_iris.sepal_length
y2 = df_iris.sepal_width
plt.plot(x1, y1, 'g^', x2, y2, 'bs')
plt.show()
```
#### Histogram
```
help(plt.hist)
n, bins, patches = plt.hist(df_iris.petal_length, bins=20,facecolor='#8303A2', alpha=0.8, rwidth=.8, align='mid')
# Add a title
plt.title('Iris dataset petal length')
# Add y axis label
plt.ylabel('number of plants')
```
#### Boxplot
```
help(plt.boxplot)
plt.boxplot(df_iris.petal_length)
# Add a title
plt.title('Iris dataset petal length')
# Add y axis label
plt.ylabel('petal length')
```
The biggest issue with `matplotlib` isn't its lack of power...it is that it is too much power. With great power, comes great responsibility. When you are quickly exploring data, you don't want to have to fiddle around with axis limits, colors, figure sizes, etc. Yes, you *can* make good figures with `matplotlib`, but you probably won't.
https://python-graph-gallery.com/matplotlib/
Pandas works off of `matplotlib` by default. You can easily start visualizing dataframs and series just by a simple command.
#### Using pandas `.plot()`
Pandas abstracts some of those initial issues with data visualization. However, it is still a `matplotlib` plot</br></br>
Every plot that is returned from `pandas` is subject to `matplotlib` modification.
```
df_iris.plot.box()
plt.show()
# Plot the histogram of the petal lengths
# Plot the histograms of all 4 numerical characteristics in a plot
df_iris.groupby("species")['petal_length'].mean().plot(kind='bar')
plt.show()
df_iris.plot(x='petal_length', y='petal_width', kind = "scatter")
plt.savefig('output.png')
plt.savefig('output.png')
```
https://github.com/pandas-dev/pandas/blob/v0.25.0/pandas/plotting/_core.py#L504-L1533
#### Multiple Plots
```
df_iris.petal_length.plot(kind='density')
df_iris.sepal_length.plot(kind='density')
df_iris.petal_width.plot(kind='density')
```
`matplotlib` allows users to define the regions of their plotting canvas. If the user intends to create a canvas with multiple plots, they would use the `subplot()` function. The `subplot` function sets the number of rows and columns the canvas will have **AND** sets the current index of where the next subplot will be rendered.
```
plt.figure(1)
# Plot all three columns from df in different subplots
# Rows first index (top-left)
plt.subplot(3, 1, 1)
df_iris.petal_length.plot(kind='density')
plt.subplot(3, 1, 2)
df_iris.sepal_length.plot(kind='density')
plt.subplot(3, 1, 3)
df_iris.petal_width.plot(kind='density')
# Some plot configuration
plt.subplots_adjust(top=.92, bottom=.08, left=.1, right=.95, hspace=.25, wspace=.35)
plt.show()
# Temporary styles
with plt.style.context(('ggplot')):
plt.figure(1)
# Plot all three columns from df in different subplots
# Rows first index (top-left)
plt.subplot(3, 1, 1)
df_iris.petal_length.plot(kind='density')
plt.subplot(3, 1, 2)
df_iris.sepal_length.plot(kind='density')
plt.subplot(3, 1, 3)
df_iris.petal_width.plot(kind='density')
# Some plot configuration
plt.subplots_adjust(top=.92, bottom=.08, left=.1, right=.95, hspace=.25, wspace=.35)
plt.show()
# Plot the histograms of the petal length and width and sepal length and width
# Display them on the columns of a figure with 2X2 subplots
# color them red, green, blue and yellow, respectivelly
```
### `seaborn` - dataset-oriented plotting
Seaborn is a library that specializes in making *prettier* `matplotlib` plots of statistical data. <br>
It is built on top of matplotlib and closely integrated with pandas data structures.
https://seaborn.pydata.org/introduction.html<br>
https://python-graph-gallery.com/seaborn/
```
import seaborn as sns
```
`seaborn` lets users *style* their plotting environment.
```
sns.set(style='whitegrid')
```
However, you can always use `matplotlib`'s `plt.style`
```
#dir(sns)
sns.scatterplot(x='petal_length',y='petal_width',data=df_iris)
plt.show()
sns.scatterplot(x='petal_length',y='petal_width', hue = "species",data=df_iris)
plt.show()
```
#### Violin plot
Fancier box plot that gets rid of the need for 'jitter' to show the inherent distribution of the data points
```
columns = ['petal_length', 'petal_width', 'sepal_length']
fig, axes = plt.subplots(figsize=(10, 10))
sns.violinplot(data=df_iris.loc[:,columns], ax=axes)
axes.set_ylabel('number')
axes.set_xlabel('columns', )
plt.show()
```
#### Distplot
```
sns.set(style='darkgrid', palette='muted')
# 1 row, 3 columns
f, axes = plt.subplots(4,1, figsize=(10,10), sharex=True)
sns.despine(left=True)
# Regular displot
sns.distplot(df_iris.petal_length, ax=axes[0])
# Change the color
sns.distplot(df_iris.petal_width, kde=False, ax=axes[1], color='orange')
# Show the Kernel density estimate
sns.distplot(df_iris.sepal_width, hist=False, kde_kws={'shade':True}, ax=axes[2], color='purple')
# Show the rug
sns.distplot(df_iris.sepal_length, hist=False, rug=True, ax=axes[3], color='green')
```
#### FacetGrid
```
sns.set()
columns = ['species', 'petal_length', 'petal_width']
facet_column = 'species'
g = sns.FacetGrid(df_iris.loc[:,columns], col=facet_column, hue=facet_column, col_wrap=5)
g.map(plt.scatter, 'petal_length', 'petal_width')
sns.relplot(x="petal_length", y="petal_width", col="species",
hue="species", style="species", size="species",
data=df_iris)
plt.show()
```
https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html
### `plotnine` - R ggplot2 in python
plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot.
Plotting with a grammar is powerful, it makes custom (and otherwise complex) plots are easy to think about and then create, while the simple plots remain simple.
```
!pip install plotnine
```
https://plotnine.readthedocs.io/en/stable/
```
from plotnine import *
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point()
# add transparency - to avoid over plotting
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point(alpha=0.7)
# change point size
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point(size = 0.7, alpha=0.7)
# more parameters
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point() + scale_x_log10() + xlab("Petal Length")
n = "3"
ft = "length and width"
title = 'species : %s, petal : %s' % (n,ft)
ggplot(data=df_iris) +aes(x='petal_length',y='petal_width',color="species") + geom_point(size=0.7,alpha=0.7) + facet_wrap('~species',nrow=3) + theme(figure_size=(9,5)) + ggtitle(title)
p = ggplot(data=df_iris) + aes(x='petal_length') + geom_histogram(binwidth=1,color='black',fill='grey')
p
ggsave(plot=p, filename='hist_plot_with_plotnine.png')
```
http://cmdlinetips.com/2018/05/plotnine-a-python-library-to-use-ggplot2-in-python/ <br>
https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf
<img src = "https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf" width = "1000"/>
Use ggplot to plot the sepal_length in boxplots separated by species, add new axes labels and make the y axis values log10.
* Write a function that takes as a parameter a line of the dataframe and if the species is
** setosa it returns the petal_length
** versicolor it returns the petal_width
** virginica it returns the sepal_length
Apply this function to every line in the dataset in a for loop and save the result in an array
Use ggplot to make a histogram of the values
| true |
code
| 0.751249 | null | null | null | null |
|
# 2A.eco - Python et la logique SQL - correction
Correction d'exercices sur SQL.
```
from jyquickhelper import add_notebook_menu
add_notebook_menu()
```
SQL permet de créer des tables, de rechercher, d'ajouter, de modifier ou de supprimer des données dans les bases de données.
Un peu ce que vous ferez bientôt tous les jours. C’est un langage de management de données, pas de nettoyage, d’analyse ou de statistiques avancées.
Les instructions SQL s'écrivent d'une manière qui ressemble à celle de phrases ordinaires en anglais. Cette ressemblance voulue vise à faciliter l'apprentissage et la lecture. Il est néanmoins important de respecter un ordre pour les différentes instructions.
Dans ce TD, nous allons écrire des commandes en SQL via Python.
Pour plus de précisions sur SQL et les commandes qui existent, rendez-vous là [SQL, PRINCIPES DE BASE](http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx/ext2a/sql_doc.html).
## Se connecter à une base de données
A la différence des tables qu'on utilise habituellement, la base de données n'est pas visible directement en ouvrant Excel ou un éditeur de texte. Pour avoir une vue de ce que contient la base de données, il est nécessaire d'avoir un autre type de logiciel.
Pour le TD, nous vous recommandans d'installer SQLLiteSpy (disponible à cette adresse [SqliteSpy](http://www.yunqa.de/delphi/products/sqlitespy/index) ou [sqlite_bro](https://pypi.python.org/pypi/sqlite_bro) si vous voulez voir à quoi ressemble les données avant de les utiliser avec Python.
```
import sqlite3
# on va se connecter à une base de données SQL vide
# SQLite stocke la BDD dans un simple fichier
filepath = "./DataBase.db"
open(filepath, 'w').close() #crée un fichier vide
CreateDataBase = sqlite3.connect(filepath)
QueryCurs = CreateDataBase.cursor()
```
La méthode cursor() est un peu particulière :
Il s'agit d'une sorte de tampon mémoire intermédiaire, destiné à mémoriser temporairement les données en cours de traitement, ainsi que les opérations que vous effectuez sur elles, avant leur transfert définitif dans la base de données. Tant que la méthode .commit() n'aura pas été appelée, aucun ordre ne sera appliqué à la base de données.
--------------------
A présent que nous sommes connectés à la base de données, on va créer une table qui contient plusieurs variables de format différents
- ID sera la clé primaire de la base
- Nom, Rue, Ville, Pays seront du text
- Prix sera un réel
```
# On définit une fonction de création de table
def CreateTable(nom_bdd):
QueryCurs.execute('''CREATE TABLE IF NOT EXISTS ''' + nom_bdd + '''
(id INTEGER PRIMARY KEY, Name TEXT,City TEXT, Country TEXT, Price REAL)''')
# On définit une fonction qui permet d'ajouter des observations dans la table
def AddEntry(nom_bdd, Nom,Ville,Pays,Prix):
QueryCurs.execute('''INSERT INTO ''' + nom_bdd + '''
(Name,City,Country,Price) VALUES (?,?,?,?)''',(Nom,Ville,Pays,Prix))
def AddEntries(nom_bdd, data):
""" data : list with (Name,City,Country,Price) tuples to insert
"""
QueryCurs.executemany('''INSERT INTO ''' + nom_bdd + '''
(Name,City,Country,Price) VALUES (?,?,?,?)''',data)
### On va créer la table clients
CreateTable('Clients')
AddEntry('Clients','Toto','Munich','Germany',5.2)
AddEntries('Clients',
[('Bill','Berlin','Germany',2.3),
('Tom','Paris','France',7.8),
('Marvin','Miami','USA',15.2),
('Anna','Paris','USA',7.8)])
# on va "commit" c'est à dire qu'on va valider la transaction.
# > on va envoyer ses modifications locales vers le référentiel central - la base de données SQL
CreateDataBase.commit()
```
### Voir la table
Pour voir ce qu'il y a dans la table, on utilise un premier Select où on demande à voir toute la table
```
QueryCurs.execute('SELECT * FROM Clients')
Values = QueryCurs.fetchall()
print(Values)
```
### Passer en pandas
Rien de plus simple : plusieurs manières de faire
```
import pandas as pd
# méthode SQL Query
df1 = pd.read_sql_query('SELECT * FROM Clients', CreateDataBase)
print("En utilisant la méthode read_sql_query \n", df1.head(), "\n")
#méthode DataFrame en utilisant la liste issue de .fetchall()
df2 = pd.DataFrame(Values, columns=['ID','Name','City','Country','Price'])
print("En passant par une DataFrame \n", df2.head())
```
## Comparaison SQL et pandas
### SELECT
En SQL, la sélection se fait en utilisant des virgules ou * si on veut sélectionner toutes les colonnes
```
# en SQL
QueryCurs.execute('SELECT ID,City FROM Clients LIMIT 2')
Values = QueryCurs.fetchall()
print(Values)
```
En pandas, la sélection de colonnes se fait en donnant une liste
```
#sur la table
df2[['ID','City']].head(2)
```
### WHERE
En SQL, on utilise WHERE pour filtrer les tables selon certaines conditions
```
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Paris"')
print(QueryCurs.fetchall())
```
Avec Pandas, on peut utiliser plusieurs manières de faire :
- avec un booléen
- en utilisant la méthode 'query'
```
df2[df2['City'] == "Paris"]
df2.query('City == "Paris"')
```
Pour mettre plusieurs conditions, on utilise :
- & en Python, AND en SQL
- | en python, OR en SQL
```
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Paris" AND Country == "USA"')
print(QueryCurs.fetchall())
df2.query('City == "Paris" & Country == "USA"')
df2[(df2['City'] == "Paris") & (df2['Country'] == "USA")]
```
## GROUP BY
En pandas, l'opération GROUP BY de SQL s'effectue avec une méthode similaire : groupby()
groupby() sert à regrouper des observations en groupes selon les modalités de certaines variables en appliquant une fonction d'aggrégation sur d'autres variables.
```
QueryCurs.execute('SELECT Country, count(*) FROM Clients GROUP BY Country')
print(QueryCurs.fetchall())
```
Attention, en pandas, la fonction count() ne fait pas la même chose qu'en SQL. Count() s'applique à toutes les colonnes et compte toutes les observations non nulles.
```
df2.groupby('Country').count()
```
Pour réaliser la même chose qu'en SQL, il faut utiliser la méthode size()
```
df2.groupby('Country').size()
```
On peut aussi appliquer des fonctions plus sophistiquées lors d'un groupby
```
QueryCurs.execute('SELECT Country, AVG(Price), count(*) FROM Clients GROUP BY Country')
print(QueryCurs.fetchall())
```
Avec pandas, on peut appeler les fonctions classiques de numpy
```
import numpy as np
df2.groupby('Country').agg({'Price': np.mean, 'Country': np.size})
```
Ou utiliser des fonctions lambda
```
# par exemple calculer le prix moyen et le multiplier par 2
df2.groupby('Country')['Price'].apply(lambda x: 2*x.mean())
QueryCurs.execute('SELECT Country, 2*AVG(Price) FROM Clients GROUP BY Country').fetchall()
QueryCurs.execute('SELECT * FROM Clients WHERE Country == "Germany"')
print(QueryCurs.fetchall())
QueryCurs.execute('SELECT * FROM Clients WHERE City=="Berlin" AND Country == "Germany"')
print(QueryCurs.fetchall())
QueryCurs.execute('SELECT * FROM Clients WHERE Price BETWEEN 7 AND 20')
print(QueryCurs.fetchall())
```
## Enregistrer une table SQL sous un autre format
On utilise le package csv, l'option 'w' pour 'write'.
On crée l'objet "writer", qui vient du package csv.
Cet objet a deux méthodes :
- writerow pour les noms de colonnes : une liste
- writerows pour les lignes : un ensemble de liste
```
data = QueryCurs.execute('SELECT * FROM Clients')
import csv
with open('./output.csv', 'w') as file:
writer = csv.writer(file)
writer.writerow(['id','Name','City','Country','Price'])
writer.writerows(data)
```
On peut également passer par un DataFrame pandas et utiliser .to_csv()
```
QueryCurs.execute('''DROP TABLE Clients''')
#QueryCurs.close()
```
## Exercice
Dans cet exercice, nous allons manipuler les tables de la base de données World.
Avant tout, connectez vous à la base de donénes en utilisant sqlite3 et connect
Lien vers la base de données : [World.db3](https://github.com/sdpython/ensae_teaching_cs/raw/master/src/ensae_teaching_cs/data/data_sql/World.db3) ou
```
from ensae_teaching_cs.data import simple_database
name = simple_database()
```
```
#Se connecter à la base de données WORLD
CreateDataBase = sqlite3.connect("./World.db3")
QueryCurs = CreateDataBase.cursor()
```
Familiarisez vous avec la base de données : quelles sont les tables ? quelles sont les variables de ces tables ?
- utilisez la fonction PRAGMA pour obtenir des informations sur les tables
```
# pour obtenir la liste des tables dans la base de données
tables = QueryCurs.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall()
# on veut voir les colonnes de chaque table ainsi que la première ligne
for table in tables :
print("Table :", table[0])
schema = QueryCurs.execute("PRAGMA table_info({})".format(table[0])).fetchall()
print("Colonnes", ["{}".format(x[1]) for x in schema])
print("1ère ligne", QueryCurs.execute('SELECT * FROM {} LIMIT 1'.format(table[0])).fetchall(), "\n")
```
## Question 1
- Quels sont les 10 pays qui ont le plus de langues ?
- Quelle langue est présente dans le plus de pays ?
```
QueryCurs.execute("""SELECT CountryCode, COUNT(*) as NB
FROM CountryLanguage
GROUP BY CountryCode
ORDER BY NB DESC
LIMIT 10""").fetchall()
QueryCurs.execute('''SELECT Language, COUNT(*) as NB
FROM CountryLanguage
GROUP BY Language
ORDER BY -NB
LIMIT 1''').fetchall()
```
## Question 2
- Quelles sont les différentes formes de gouvernements dans les pays du monde ?
- Quels sont les 3 gouvernements où la population est la plus importante ?
```
QueryCurs.execute('''SELECT DISTINCT GovernmentForm FROM Country''').fetchall()
QueryCurs.execute('''SELECT GovernmentForm, SUM(Population) as Pop_Totale_Gouv
FROM Country
GROUP BY GovernmentForm
ORDER BY Pop_Totale_Gouv DESC
LIMIT 3
''').fetchall()
```
## Question 3
- Combien de pays ont Elisabeth II à la tête de leur gouvernement ?
- Quelle proporition des sujets de Sa Majesté ne parlent pas anglais ?
- 78 % ou 83% ?
```
QueryCurs.execute('''SELECT HeadOfState, Count(*)
FROM Country
WHERE HeadOfState = "Elisabeth II" ''').fetchall()
# la population totale
population_queen_elisabeth = QueryCurs.execute('''SELECT HeadOfState, SUM(Population)
FROM Country
WHERE HeadOfState = "Elisabeth II"''').fetchall()
# La part de la population parlant anglais
Part_parlant_anglais= QueryCurs.execute('''SELECT Language, SUM(Percentage*0.01*Population)
FROM
Country
LEFT JOIN
CountryLanguage
ON Country.Code = CountryLanguage.CountryCode
WHERE HeadOfState = "Elisabeth II"
AND Language = "English"
''').fetchall()
# La réponse est 78% d'après ces données
Part_parlant_anglais[0][1]/population_queen_elisabeth[0][1]
## on trouve 83% si on ne fait pas attention au fait que dans certaines zones, 0% de la population parle anglais
## La population totale n'est alors pas la bonne, comme dans cet exemple
QueryCurs.execute('''SELECT Language,
SUM(Population_pays*0.01*Percentage) as Part_parlant_anglais, SUM(Population_pays) as Population_totale
FROM (SELECT Language, Code, Percentage, SUM(Population) as Population_pays
FROM
Country
LEFT JOIN
CountryLanguage
ON Country.Code = CountryLanguage.CountryCode
WHERE HeadOfState = "Elisabeth II" AND Language == "English"
GROUP BY Code)''').fetchall()
```
Conclusion: il vaut mieux écrire deux requêtes simples et lisibles pour obtenir le bon résultat, plutôt qu'une requête qui fait tout en une seule passe mais dont on va devoir vérifier la correction longuement...
## Question 4 - passons à Pandas
Créer une DataFrame qui contient les informations suivantes par pays :
- le nom
- le code du pays
- le nombre de langues parlées
- le nombre de langues officielles
- la population
- le GNP
- l'espérance de vie
**Indice : utiliser la commande pd.read_sql_query**
Que dit la matrice de corrélation de ces variables ?
```
df = pd.read_sql_query('''SELECT Code, Name, Population, GNP , LifeExpectancy,
COUNT(*) as Nb_langues_parlees, SUM(IsOfficial) as Nb_langues_officielles
FROM Country
INNER JOIN CountryLanguage ON Country.Code = CountryLanguage.CountryCode
GROUP BY Country.Code''',
CreateDataBase)
df.head()
df.corr()
```
| true |
code
| 0.28232 | null | null | null | null |
|
# OpenVINO benchmarking with 2D U-Net
In this tutorial, we will use the Intel® Distribution of OpenVINO™ Toolkit to perform benchmarking
This tutorial assumes that you have already downloaded and installed [Intel® OpenVINO™](https://software.intel.com/en-us/openvino-toolkit/choose-download) on your computer.
In order to use Intel® OpenVINO™, we need to do a few steps:
1. Convert our Keras model to a Tensorflow model.
1. Freeze the Tensorflow saved format model
1. Use the OpenVINO Model Optimizer to convert the above freezed-model to the OpenVINO Intermediate Representation (IR) format
1. Benchmark using the OpenVINO benchmark tool: `/opt/intel/openvino/deployment_tools/tools/benchmark_tool/benchmark_app.py`
```
import keras
import os
import tensorflow as tf
import numpy as np
import keras as K
import shutil, sys
def dice_coef(y_true, y_pred, axis=(1, 2), smooth=1):
"""
Sorenson (Soft) Dice
\frac{ 2 \times \left | T \right | \cap \left | P \right |}{ \left | T \right | + \left | P \right | }
where T is ground truth mask and P is the prediction mask
"""
intersection = tf.reduce_sum(y_true * y_pred, axis=axis)
union = tf.reduce_sum(y_true + y_pred, axis=axis)
numerator = tf.constant(2.) * intersection + smooth
denominator = union + smooth
coef = numerator / denominator
return tf.reduce_mean(coef)
def soft_dice_coef(target, prediction, axis=(1, 2), smooth=0.01):
"""
Sorenson (Soft) Dice - Don't round the predictions
\frac{ 2 \times \left | T \right | \cap \left | P \right |}{ \left | T \right | + \left | P \right | }
where T is ground truth mask and P is the prediction mask
"""
intersection = tf.reduce_sum(target * prediction, axis=axis)
union = tf.reduce_sum(target + prediction, axis=axis)
numerator = tf.constant(2.) * intersection + smooth
denominator = union + smooth
coef = numerator / denominator
return tf.reduce_mean(coef)
def dice_coef_loss(target, prediction, axis=(1, 2), smooth=1.):
"""
Sorenson (Soft) Dice loss
Using -log(Dice) as the loss since it is better behaved.
Also, the log allows avoidance of the division which
can help prevent underflow when the numbers are very small.
"""
intersection = tf.reduce_sum(prediction * target, axis=axis)
p = tf.reduce_sum(prediction, axis=axis)
t = tf.reduce_sum(target, axis=axis)
numerator = tf.reduce_mean(intersection + smooth)
denominator = tf.reduce_mean(t + p + smooth)
dice_loss = -tf.log(2.*numerator) + tf.log(denominator)
return dice_loss
def combined_dice_ce_loss(y_true, y_pred, axis=(1, 2), smooth=1.,
weight=0.9):
"""
Combined Dice and Binary Cross Entropy Loss
"""
return weight*dice_coef_loss(y_true, y_pred, axis, smooth) + \
(1-weight)*K.losses.binary_crossentropy(y_true, y_pred)
inference_filename = "unet_decathlon_4_8814_128x128_randomcrop-any-input.h5"
model_filename = os.path.join("/home/ubuntu/models/unet", inference_filename)
# Load model
print("Loading Model... ")
model = K.models.load_model(model_filename, custom_objects={
"combined_dice_ce_loss": combined_dice_ce_loss,
"dice_coef_loss": dice_coef_loss,
"soft_dice_coef": soft_dice_coef,
"dice_coef": dice_coef})
print("Model loaded successfully from: " + model_filename)
sess = keras.backend.get_session()
sess.run(tf.global_variables_initializer())
import shutil, sys
output_directory = "/home/ubuntu/models/unet/output"
print("Freezing the graph.")
keras.backend.set_learning_phase(0)
signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'input': model.input}, outputs={'output': model.output})
#If directory exists, delete it and let builder rebuild the TF model.
if os.path.isdir(output_directory):
print (output_directory, "exists already. Deleting the folder")
shutil.rmtree(output_directory)
builder = tf.saved_model.builder.SavedModelBuilder(output_directory)
builder.add_meta_graph_and_variables(sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:signature
}, saver=tf.train.Saver())
builder.save()
print("TensorFlow protobuf version of model is saved in:", output_directory)
print("Model input name = ", model.input.op.name)
print("Model input shape = ", model.input.shape)
print("Model output name = ", model.output.op.name)
print("Model output shape = ", model.output.shape)
output_frozen_model_dir = "/home/ubuntu/models/unet/frozen_model"
output_frozen_graph = output_frozen_model_dir+'/saved_model_frozen.pb'
if not os.path.isdir(output_frozen_model_dir):
os.mkdir(output_frozen_model_dir)
else:
print('Directory', output_frozen_model_dir, 'already exists. Deleting it and re-creating it')
shutil.rmtree(output_frozen_model_dir)
os.mkdir(output_frozen_model_dir)
from tensorflow.python.tools.freeze_graph import freeze_graph
_ = freeze_graph(input_graph="",
input_saver="",
input_binary=False,
input_checkpoint="",
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
clear_devices=True,
initializer_nodes="",
input_saved_model_dir=output_directory,
output_node_names=model.output.op.name,
output_graph=output_frozen_graph)
print("TensorFlow Frozen model model is saved in:", output_frozen_graph)
output_frozen_model_dir = "/home/ubuntu/models/unet/frozen_model"
output_frozen_graph = output_frozen_model_dir+'/saved_model_frozen.pb'
if not os.path.exists(output_frozen_graph):
print(output_frozen_graph + ' doesn\'t exist. Please make sure you have a trained keras to TF frozen model')
!mo_tf.py \
--input_model '/home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb' \
--input_shape=[1,160,160,4] \
--data_type FP32 \
--output_dir /home/ubuntu/models/unet/IR_models/FP32 \
--model_name saved_model
```
#### Run the following command in the terminal
```
mo_tf.py \
--input_model '/home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb' \
--input_shape=[1,160,160,4] \
--data_type FP32 \
--output_dir /home/ubuntu/models/unet/IR_models/FP32 \
--model_name saved_model
```
#### Sample Output:
```
(tensorflow_p36) ubuntu@ip-172-31-46-30:~$ mo_tf.py \
> --input_model '/home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb' \
> --input_shape=[1,160,160,4] \
> --data_type FP32 \
> --output_dir /home/ubuntu/models/unet/IR_models/FP32 \
> --model_name saved_model
Model Optimizer arguments:
Common parameters:
- Path to the Input Model: /home/ubuntu/models/unet/frozen_model/saved_model_frozen.pb
- Path for generated IR: /home/ubuntu/models/unet/IR_models/FP32
- IR output name: saved_model
- Log level: ERROR
- Batch: Not specified, inherited from the model
- Input layers: Not specified, inherited from the model
- Output layers: Not specified, inherited from the model
- Input shapes: [1,160,160,4]
- Mean values: Not specified
- Scale values: Not specified
- Scale factor: Not specified
- Precision of IR: FP32
- Enable fusing: True
- Enable grouped convolutions fusing: True
- Move mean values to preprocess section: False
- Reverse input channels: False
TensorFlow specific parameters:
- Input model in text protobuf format: False
- Path to model dump for TensorBoard: None
- List of shared libraries with TensorFlow custom layers implementation: None
- Update the configuration file with input/output node names: None
- Use configuration file used to generate the model with Object Detection API: None
- Operations to offload: None
- Patterns to offload: None
- Use the config file: None
Model Optimizer version: 2020.1.0-61-gd349c3ba4a
[ SUCCESS ] Generated IR version 10 model.
[ SUCCESS ] XML file: /home/ubuntu/models/unet/IR_models/FP32/saved_model.xml
[ SUCCESS ] BIN file: /home/ubuntu/models/unet/IR_models/FP32/saved_model.bin
[ SUCCESS ] Total execution time: 6.41 seconds.
[ SUCCESS ] Memory consumed: 443 MB.
```
## Benchmark
Benchmark using the following command:
```
python3 /opt/intel/openvino/deployment_tools/tools/benchmark_tool/benchmark_app.py \
-m /home/ubuntu/models/unet/IR_models/FP32/saved_model.xml \
-nireq 1 -nstreams 1
```
#### Sample Output
```
[Step 1/11] Parsing and validating input arguments
[Step 2/11] Loading Inference Engine
[ INFO ] InferenceEngine:
API version............. 2.1.37988
[ INFO ] Device info
CPU
MKLDNNPlugin............ version 2.1
Build................... 37988
[Step 3/11] Reading the Intermediate Representation network
[Step 4/11] Resizing network to match image sizes and given batch
[ INFO ] Network batch size: 1, precision: MIXED
[Step 5/11] Configuring input of the model
[Step 6/11] Setting device configuration
[Step 7/11] Loading the model to the device
[Step 8/11] Setting optimal runtime parameters
[Step 9/11] Creating infer requests and filling input blobs with images
[ INFO ] Network input 'MRImages' precision FP32, dimensions (NCHW): 1 4 160 160
[ WARNING ] No input files were given: all inputs will be filled with random values!
[ INFO ] Infer Request 0 filling
[ INFO ] Fill input 'MRImages' with random values (some binary data is expected)
[Step 10/11] Measuring performance (Start inference asyncronously, 1 inference requests using 1 streams for CPU, limits: 60000 ms duration)
[Step 11/11] Dumping statistics report
Count: 11079 iterations
Duration: 60014.36 ms
Latency: 5.11 ms
Throughput: 184.61 FPS
```
| true |
code
| 0.577912 | null | null | null | null |
|
# My Project
In addition to being a place to experiment, this project has been structured to build and serve your model in a Flask application. The purpose is to allow data science exploration to easily transition into deployed services and applications on the OpenShift platform. After saving this project to git, it can be built on the OpenShift platform to serve models.
Your dependencies will live in `requirements.txt` and your prediction function will live in `prediction.py`. As a Python based s2i application, this project can be configured and built upon to fit your needs.
### Project Organization
```
.
├── README.md
├── LICENSE
├── requirements.txt <- Used to install packages for s2i application
├── 0_start_here.ipynb <- Instructional notebook
├── 1_run_flask.ipynb <- Notebook for running flask locally to test
├── 2_test_flask.ipynb <- Notebook for testing flask requests
├── .gitignore <- standard python gitignore
├── .s2i <- hidden folder for advanced s2i configuration
│ └── environment <- s2i environment settings
├── gunicorn_config.py <- configuration for gunicorn when run in OpenShift
├── prediction.py <- the predict function called from Flask
└── wsgi.py <- basic Flask application
```
### Basic Flow
1. Install and manage dependencies in `requirements.txt`.
1. Experiment as usual.
1. Extract your prediction into the `prediction.py` file.
1. Update any dependencies.
1. Run and test your application locally.
1. Save to git.
For a complete overview, please read the [README.md](./README.md)
## Install Dependencies
```
import sys
!{sys.executable} -m pip install -r requirements.txt
```
## Experiment
Experiment with data and create your prediction function. Create any serialized models needed.
```
def predict(args_dict):
return {'prediction': 'not implemented'}
predict({'keys': 'values'})
```
## Create a Predict Function
Extract the prediction logic into a standalone python file, `prediction.py` in a `predict` function. Also, make sure `requirements.txt` is updated with any additional packages you've used and need for prediction.
```
def predict(args_dict):
return {'prediction': 'not implemented'}
```
## Test Predict Function
```
from prediction import predict
predict({'keys': 'values'})
```
### Run Flask
Run flask in a separate notebook ([1_run_flask.ipynb](./1_run_flask.ipynb)) to create a local service to try it out. You must run the application in a separate notebook since it will use the kernel until stopped.
```
!FLASK_ENV=development FLASK_APP=wsgi.py flask run
```
### Test the Flask Endpoint
Test your new service endpoint in this notebook or from a separate notebook ([2_test_flask.ipynb](./2_test_flask.ipynb)) to try it out. You can
```
!curl -X POST -H "Content-Type: application/json" --data '{"data": "hello world"}' http://localhost:5000/predictions
import requests
import json
response = requests.post('http://127.0.0.1:5000/predictions', '{"hello":"world"}')
response.json()
```
### Save Your Project to Git (and Build)
Now that you've created and tested your prediction and service endpoint, push the code up to git. This can be built as an s2i application on OpenShift.
| true |
code
| 0.44559 | null | null | null | null |
|
# 2.4 ネットワークモデルの実装、2.5 順伝搬関数の実装
本ファイルでは、SSDのネットワークモデルと順伝搬forward関数を作成します。
# 2.4 学習目標
1. SSDのネットワークモデルを構築している4つのモジュールを把握する
2. SSDのネットワークモデルを作成できるようになる
3. SSDで使用する様々な大きさのデフォルトボックスの実装方法を理解する
# 2.5 学習目標
1. Non-Maximum Suppressionを理解する
2. SSDの推論時に使用するDetectクラスの順伝搬を理解する
3. SSDの順伝搬を実装できるようになる
# 事前準備
とくになし
```
# パッケージのimport
from math import sqrt
from itertools import product
import pandas as pd
import torch
from torch.autograd import Function
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
```
# vggモジュールを実装
```
# 34層にわたる、vggモジュールを作成
def make_vgg():
layers = []
in_channels = 3 # 色チャネル数
# vggモジュールで使用する畳み込み層やマックスプーリングのチャネル数
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256,
256, 'MC', 512, 512, 512, 'M', 512, 512, 512]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'MC':
# ceilは出力サイズを、計算結果(float)に対して、切り上げで整数にするモード
# デフォルトでは出力サイズを計算結果(float)に対して、切り下げで整数にするfloorモード
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return nn.ModuleList(layers)
# 動作確認
vgg_test = make_vgg()
print(vgg_test)
```
# extrasモジュールを実装
```
# 8層にわたる、extrasモジュールを作成
def make_extras():
layers = []
in_channels = 1024 # vggモジュールから出力された、extraに入力される画像チャネル数
# extraモジュールの畳み込み層のチャネル数を設定するコンフィギュレーション
cfg = [256, 512, 128, 256, 128, 256, 128, 256]
layers += [nn.Conv2d(in_channels, cfg[0], kernel_size=(1))]
layers += [nn.Conv2d(cfg[0], cfg[1], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[1], cfg[2], kernel_size=(1))]
layers += [nn.Conv2d(cfg[2], cfg[3], kernel_size=(3), stride=2, padding=1)]
layers += [nn.Conv2d(cfg[3], cfg[4], kernel_size=(1))]
layers += [nn.Conv2d(cfg[4], cfg[5], kernel_size=(3))]
layers += [nn.Conv2d(cfg[5], cfg[6], kernel_size=(1))]
layers += [nn.Conv2d(cfg[6], cfg[7], kernel_size=(3))]
# 活性化関数のReLUは今回はSSDモデルの順伝搬のなかで用意することにし、
# extraモジュールでは用意していません
return nn.ModuleList(layers)
# 動作確認
extras_test = make_extras()
print(extras_test)
```
# locモジュールとconfモジュールを実装
```
# デフォルトボックスのオフセットを出力するloc_layers、
# デフォルトボックスに対する各クラスの信頼度confidenceを出力するconf_layersを作成
def make_loc_conf(num_classes=21, bbox_aspect_num=[4, 6, 6, 6, 4, 4]):
loc_layers = []
conf_layers = []
# VGGの22層目、conv4_3(source1)に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[0]
* num_classes, kernel_size=3, padding=1)]
# VGGの最終層(source2)に対する畳み込み層
loc_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(1024, bbox_aspect_num[1]
* num_classes, kernel_size=3, padding=1)]
# extraの(source3)に対する畳み込み層
loc_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(512, bbox_aspect_num[2]
* num_classes, kernel_size=3, padding=1)]
# extraの(source4)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[3]
* num_classes, kernel_size=3, padding=1)]
# extraの(source5)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[4]
* num_classes, kernel_size=3, padding=1)]
# extraの(source6)に対する畳み込み層
loc_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(256, bbox_aspect_num[5]
* num_classes, kernel_size=3, padding=1)]
return nn.ModuleList(loc_layers), nn.ModuleList(conf_layers)
# 動作確認
loc_test, conf_test = make_loc_conf()
print(loc_test)
print(conf_test)
```
# L2Norm層を実装
```
# convC4_3からの出力をscale=20のL2Normで正規化する層
class L2Norm(nn.Module):
def __init__(self, input_channels=512, scale=20):
super(L2Norm, self).__init__() # 親クラスのコンストラクタ実行
self.weight = nn.Parameter(torch.Tensor(input_channels))
self.scale = scale # 係数weightの初期値として設定する値
self.reset_parameters() # パラメータの初期化
self.eps = 1e-10
def reset_parameters(self):
'''結合パラメータを大きさscaleの値にする初期化を実行'''
init.constant_(self.weight, self.scale) # weightの値がすべてscale(=20)になる
def forward(self, x):
'''38×38の特徴量に対して、512チャネルにわたって2乗和のルートを求めた
38×38個の値を使用し、各特徴量を正規化してから係数をかけ算する層'''
# 各チャネルにおける38×38個の特徴量のチャネル方向の2乗和を計算し、
# さらにルートを求め、割り算して正規化する
# normのテンソルサイズはtorch.Size([batch_num, 1, 38, 38])になります
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps
x = torch.div(x, norm)
# 係数をかける。係数はチャネルごとに1つで、512個の係数を持つ
# self.weightのテンソルサイズはtorch.Size([512])なので
# torch.Size([batch_num, 512, 38, 38])まで変形します
weights = self.weight.unsqueeze(
0).unsqueeze(2).unsqueeze(3).expand_as(x)
out = weights * x
return out
```
# デフォルトボックスを実装
```
# デフォルトボックスを出力するクラス
class DBox(object):
def __init__(self, cfg):
super(DBox, self).__init__()
# 初期設定
self.image_size = cfg['input_size'] # 画像サイズの300
# [38, 19, …] 各sourceの特徴量マップのサイズ
self.feature_maps = cfg['feature_maps']
self.num_priors = len(cfg["feature_maps"]) # sourceの個数=6
self.steps = cfg['steps'] # [8, 16, …] DBoxのピクセルサイズ
self.min_sizes = cfg['min_sizes']
# [30, 60, …] 小さい正方形のDBoxのピクセルサイズ(正確には面積)
self.max_sizes = cfg['max_sizes']
# [60, 111, …] 大きい正方形のDBoxのピクセルサイズ(正確には面積)
self.aspect_ratios = cfg['aspect_ratios'] # 長方形のDBoxのアスペクト比
def make_dbox_list(self):
'''DBoxを作成する'''
mean = []
# 'feature_maps': [38, 19, 10, 5, 3, 1]
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f), repeat=2): # fまでの数で2ペアの組み合わせを作る f_P_2 個
# 特徴量の画像サイズ
# 300 / 'steps': [8, 16, 32, 64, 100, 300],
f_k = self.image_size / self.steps[k]
# DBoxの中心座標 x,y ただし、0~1で規格化している
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# アスペクト比1の小さいDBox [cx,cy, width, height]
# 'min_sizes': [30, 60, 111, 162, 213, 264]
s_k = self.min_sizes[k]/self.image_size
mean += [cx, cy, s_k, s_k]
# アスペクト比1の大きいDBox [cx,cy, width, height]
# 'max_sizes': [60, 111, 162, 213, 264, 315],
s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))
mean += [cx, cy, s_k_prime, s_k_prime]
# その他のアスペクト比のdefBox [cx,cy, width, height]
for ar in self.aspect_ratios[k]:
mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]
mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]
# DBoxをテンソルに変換 torch.Size([8732, 4])
output = torch.Tensor(mean).view(-1, 4)
# DBoxが画像の外にはみ出るのを防ぐため、大きさを最小0、最大1にする
output.clamp_(max=1, min=0)
return output
# 動作の確認
# SSD300の設定
ssd_cfg = {
'num_classes': 21, # 背景クラスを含めた合計クラス数
'input_size': 300, # 画像の入力サイズ
'bbox_aspect_num': [4, 6, 6, 6, 4, 4], # 出力するDBoxのアスペクト比の種類
'feature_maps': [38, 19, 10, 5, 3, 1], # 各sourceの画像サイズ
'steps': [8, 16, 32, 64, 100, 300], # DBOXの大きさを決める
'min_sizes': [30, 60, 111, 162, 213, 264], # DBOXの大きさを決める
'max_sizes': [60, 111, 162, 213, 264, 315], # DBOXの大きさを決める
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
}
# DBox作成
dbox = DBox(ssd_cfg)
dbox_list = dbox.make_dbox_list()
# DBoxの出力を確認する
pd.DataFrame(dbox_list.numpy())
```
# SSDクラスを実装
```
# SSDクラスを作成する
class SSD(nn.Module):
def __init__(self, phase, cfg):
super(SSD, self).__init__()
self.phase = phase # train or inferenceを指定
self.num_classes = cfg["num_classes"] # クラス数=21
# SSDのネットワークを作る
self.vgg = make_vgg()
self.extras = make_extras()
self.L2Norm = L2Norm()
self.loc, self.conf = make_loc_conf(
cfg["num_classes"], cfg["bbox_aspect_num"])
# DBox作成
dbox = DBox(cfg)
self.dbox_list = dbox.make_dbox_list()
# 推論時はクラス「Detect」を用意します
if phase == 'inference':
self.detect = Detect()
# 動作確認
ssd_test = SSD(phase="train", cfg=ssd_cfg)
print(ssd_test)
```
# ここから2.5節 順伝搬の実装です
# 関数decodeを実装する
```
# オフセット情報を使い、DBoxをBBoxに変換する関数
def decode(loc, dbox_list):
"""
オフセット情報を使い、DBoxをBBoxに変換する。
Parameters
----------
loc: [8732,4]
SSDモデルで推論するオフセット情報。
dbox_list: [8732,4]
DBoxの情報
Returns
-------
boxes : [xmin, ymin, xmax, ymax]
BBoxの情報
"""
# DBoxは[cx, cy, width, height]で格納されている
# locも[Δcx, Δcy, Δwidth, Δheight]で格納されている
# オフセット情報からBBoxを求める
boxes = torch.cat((
dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:],
dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1)
# boxesのサイズはtorch.Size([8732, 4])となります
# BBoxの座標情報を[cx, cy, width, height]から[xmin, ymin, xmax, ymax] に
boxes[:, :2] -= boxes[:, 2:] / 2 # 座標(xmin,ymin)へ変換
boxes[:, 2:] += boxes[:, :2] # 座標(xmax,ymax)へ変換
return boxes
```
# Non-Maximum Suppressionを行う関数を実装する
```
# Non-Maximum Suppressionを行う関数
def nm_suppression(boxes, scores, overlap=0.45, top_k=200):
"""
Non-Maximum Suppressionを行う関数。
boxesのうち被り過ぎ(overlap以上)のBBoxを削除する。
Parameters
----------
boxes : [確信度閾値(0.01)を超えたBBox数,4]
BBox情報。
scores :[確信度閾値(0.01)を超えたBBox数]
confの情報
Returns
-------
keep : リスト
confの降順にnmsを通過したindexが格納
count:int
nmsを通過したBBoxの数
"""
# returnのひな形を作成
count = 0
keep = scores.new(scores.size(0)).zero_().long()
# keep:torch.Size([確信度閾値を超えたBBox数])、要素は全部0
# 各BBoxの面積areaを計算
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
# boxesをコピーする。後で、BBoxの被り度合いIOUの計算に使用する際のひな形として用意
tmp_x1 = boxes.new()
tmp_y1 = boxes.new()
tmp_x2 = boxes.new()
tmp_y2 = boxes.new()
tmp_w = boxes.new()
tmp_h = boxes.new()
# socreを昇順に並び変える
v, idx = scores.sort(0)
# 上位top_k個(200個)のBBoxのindexを取り出す(200個存在しない場合もある)
idx = idx[-top_k:]
# idxの要素数が0でない限りループする
while idx.numel() > 0:
i = idx[-1] # 現在のconf最大のindexをiに
# keepの現在の最後にconf最大のindexを格納する
# このindexのBBoxと被りが大きいBBoxをこれから消去する
keep[count] = i
count += 1
# 最後のBBoxになった場合は、ループを抜ける
if idx.size(0) == 1:
break
# 現在のconf最大のindexをkeepに格納したので、idxをひとつ減らす
idx = idx[:-1]
# -------------------
# これからkeepに格納したBBoxと被りの大きいBBoxを抽出して除去する
# -------------------
# ひとつ減らしたidxまでのBBoxを、outに指定した変数として作成する
torch.index_select(x1, 0, idx, out=tmp_x1)
torch.index_select(y1, 0, idx, out=tmp_y1)
torch.index_select(x2, 0, idx, out=tmp_x2)
torch.index_select(y2, 0, idx, out=tmp_y2)
# すべてのBBoxに対して、現在のBBox=indexがiと被っている値までに設定(clamp)
tmp_x1 = torch.clamp(tmp_x1, min=x1[i])
tmp_y1 = torch.clamp(tmp_y1, min=y1[i])
tmp_x2 = torch.clamp(tmp_x2, max=x2[i])
tmp_y2 = torch.clamp(tmp_y2, max=y2[i])
# wとhのテンソルサイズをindexを1つ減らしたものにする
tmp_w.resize_as_(tmp_x2)
tmp_h.resize_as_(tmp_y2)
# clampした状態でのBBoxの幅と高さを求める
tmp_w = tmp_x2 - tmp_x1
tmp_h = tmp_y2 - tmp_y1
# 幅や高さが負になっているものは0にする
tmp_w = torch.clamp(tmp_w, min=0.0)
tmp_h = torch.clamp(tmp_h, min=0.0)
# clampされた状態での面積を求める
inter = tmp_w*tmp_h
# IoU = intersect部分 / (area(a) + area(b) - intersect部分)の計算
rem_areas = torch.index_select(area, 0, idx) # 各BBoxの元の面積
union = (rem_areas - inter) + area[i] # 2つのエリアの和(OR)の面積
IoU = inter/union
# IoUがoverlapより小さいidxのみを残す
idx = idx[IoU.le(overlap)] # leはLess than or Equal toの処理をする演算です
# IoUがoverlapより大きいidxは、最初に選んでkeepに格納したidxと同じ物体に対してBBoxを囲んでいるため消去
# whileのループが抜けたら終了
return keep, count
```
# Detectクラスを実装する
```
# SSDの推論時にconfとlocの出力から、被りを除去したBBoxを出力する
class Detect(Function):
def __init__(self, conf_thresh=0.01, top_k=200, nms_thresh=0.45):
self.softmax = nn.Softmax(dim=-1) # confをソフトマックス関数で正規化するために用意
self.conf_thresh = conf_thresh # confがconf_thresh=0.01より高いDBoxのみを扱う
self.top_k = top_k # nm_supressionでconfの高いtop_k個を計算に使用する, top_k = 200
self.nms_thresh = nms_thresh # nm_supressionでIOUがnms_thresh=0.45より大きいと、同一物体へのBBoxとみなす
def forward(self, loc_data, conf_data, dbox_list):
"""
順伝搬の計算を実行する。
Parameters
----------
loc_data: [batch_num,8732,4]
オフセット情報。
conf_data: [batch_num, 8732,num_classes]
検出の確信度。
dbox_list: [8732,4]
DBoxの情報
Returns
-------
output : torch.Size([batch_num, 21, 200, 5])
(batch_num、クラス、confのtop200、BBoxの情報)
"""
# 各サイズを取得
num_batch = loc_data.size(0) # ミニバッチのサイズ
num_dbox = loc_data.size(1) # DBoxの数 = 8732
num_classes = conf_data.size(2) # クラス数 = 21
# confはソフトマックスを適用して正規化する
conf_data = self.softmax(conf_data)
# 出力の型を作成する。テンソルサイズは[minibatch数, 21, 200, 5]
output = torch.zeros(num_batch, num_classes, self.top_k, 5)
# cof_dataを[batch_num,8732,num_classes]から[batch_num, num_classes,8732]に順番変更
conf_preds = conf_data.transpose(2, 1)
# ミニバッチごとのループ
for i in range(num_batch):
# 1. locとDBoxから修正したBBox [xmin, ymin, xmax, ymax] を求める
decoded_boxes = decode(loc_data[i], dbox_list)
# confのコピーを作成
conf_scores = conf_preds[i].clone()
# 画像クラスごとのループ(背景クラスのindexである0は計算せず、index=1から)
for cl in range(1, num_classes):
# 2.confの閾値を超えたBBoxを取り出す
# confの閾値を超えているかのマスクを作成し、
# 閾値を超えたconfのインデックスをc_maskとして取得
c_mask = conf_scores[cl].gt(self.conf_thresh)
# gtはGreater thanのこと。gtにより閾値を超えたものが1に、以下が0になる
# conf_scores:torch.Size([21, 8732])
# c_mask:torch.Size([8732])
# scoresはtorch.Size([閾値を超えたBBox数])
scores = conf_scores[cl][c_mask]
# 閾値を超えたconfがない場合、つまりscores=[]のときは、何もしない
if scores.nelement() == 0: # nelementで要素数の合計を求める
continue
# c_maskを、decoded_boxesに適用できるようにサイズを変更します
l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes)
# l_mask:torch.Size([8732, 4])
# l_maskをdecoded_boxesに適応します
boxes = decoded_boxes[l_mask].view(-1, 4)
# decoded_boxes[l_mask]で1次元になってしまうので、
# viewで(閾値を超えたBBox数, 4)サイズに変形しなおす
# 3. Non-Maximum Suppressionを実施し、被っているBBoxを取り除く
ids, count = nm_suppression(
boxes, scores, self.nms_thresh, self.top_k)
# ids:confの降順にNon-Maximum Suppressionを通過したindexが格納
# count:Non-Maximum Suppressionを通過したBBoxの数
# outputにNon-Maximum Suppressionを抜けた結果を格納
output[i, cl, :count] = torch.cat((scores[ids[:count]].unsqueeze(1),
boxes[ids[:count]]), 1)
return output # torch.Size([1, 21, 200, 5])
```
# SSDクラスを実装する
```
# SSDクラスを作成する
class SSD(nn.Module):
def __init__(self, phase, cfg):
super(SSD, self).__init__()
self.phase = phase # train or inferenceを指定
self.num_classes = cfg["num_classes"] # クラス数=21
# SSDのネットワークを作る
self.vgg = make_vgg()
self.extras = make_extras()
self.L2Norm = L2Norm()
self.loc, self.conf = make_loc_conf(
cfg["num_classes"], cfg["bbox_aspect_num"])
# DBox作成
dbox = DBox(cfg)
self.dbox_list = dbox.make_dbox_list()
# 推論時はクラス「Detect」を用意します
if phase == 'inference':
self.detect = Detect()
def forward(self, x):
sources = list() # locとconfへの入力source1~6を格納
loc = list() # locの出力を格納
conf = list() # confの出力を格納
# vggのconv4_3まで計算する
for k in range(23):
x = self.vgg[k](x)
# conv4_3の出力をL2Normに入力し、source1を作成、sourcesに追加
source1 = self.L2Norm(x)
sources.append(source1)
# vggを最後まで計算し、source2を作成、sourcesに追加
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# extrasのconvとReLUを計算
# source3~6を、sourcesに追加
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1: # conv→ReLU→cov→ReLUをしたらsourceに入れる
sources.append(x)
# source1~6に、それぞれ対応する畳み込みを1回ずつ適用する
# zipでforループの複数のリストの要素を取得
# source1~6まであるので、6回ループが回る
for (x, l, c) in zip(sources, self.loc, self.conf):
# Permuteは要素の順番を入れ替え
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# l(x)とc(x)で畳み込みを実行
# l(x)とc(x)の出力サイズは[batch_num, 4*アスペクト比の種類数, featuremapの高さ, featuremap幅]
# sourceによって、アスペクト比の種類数が異なり、面倒なので順番入れ替えて整える
# permuteで要素の順番を入れ替え、
# [minibatch数, featuremap数, featuremap数,4*アスペクト比の種類数]へ
# (注釈)
# torch.contiguous()はメモリ上で要素を連続的に配置し直す命令です。
# あとでview関数を使用します。
# このviewを行うためには、対象の変数がメモリ上で連続配置されている必要があります。
# さらにlocとconfの形を変形
# locのサイズは、torch.Size([batch_num, 34928])
# confのサイズはtorch.Size([batch_num, 183372])になる
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# さらにlocとconfの形を整える
# locのサイズは、torch.Size([batch_num, 8732, 4])
# confのサイズは、torch.Size([batch_num, 8732, 21])
loc = loc.view(loc.size(0), -1, 4)
conf = conf.view(conf.size(0), -1, self.num_classes)
# 最後に出力する
output = (loc, conf, self.dbox_list)
if self.phase == "inference": # 推論時
# クラス「Detect」のforwardを実行
# 返り値のサイズは torch.Size([batch_num, 21, 200, 5])
return self.detect(output[0], output[1], output[2])
else: # 学習時
return output
# 返り値は(loc, conf, dbox_list)のタプル
```
以上
| true |
code
| 0.728688 | null | null | null | null |
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Pointwise-Local-Reconstruction-Error" data-toc-modified-id="Pointwise-Local-Reconstruction-Error-1"><span class="toc-item-num">1 </span>Pointwise Local Reconstruction Error</a></span></li></ul></div>
Pointwise Local Reconstruction Error
====================================
Example for the usage of the `skcosmo.metrics.pointwise_local_reconstruction_error` as pointwise local reconstruction error (LFRE) on the degenerate CH4 manifold. We apply the local reconstruction measure on the degenerate CH4 manifold dataset. This dataset was specifically constructed to be representable by a 4-body features (bispectrum) but not by a 3-body features (power spectrum). In other words the dataset contains environments which are different, but have the same 3-body features. For more details about the dataset please refer to [Pozdnyakov 2020](https://doi.org/10.1103/PhysRevLett.125.166001) .
The skcosmo dataset already contains the 3 and 4-body features computed with [librascal](https://github.com/lab-cosmo/librascal) so we can load it and compare it with the LFRE.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('font', size=20)
from skcosmo.datasets import load_degenerate_CH4_manifold
from skcosmo.metrics import pointwise_local_reconstruction_error
# load features
degenerate_manifold = load_degenerate_CH4_manifold()
power_spectrum_features = degenerate_manifold.data.SOAP_power_spectrum
bispectrum_features = degenerate_manifold.data.SOAP_bispectrum
print(degenerate_manifold.DESCR)
n_local_points = 20
print("Computing pointwise LFRE...")
# local reconstruction error of power spectrum features using bispectrum features
power_spectrum_to_bispectrum_pointwise_lfre = pointwise_local_reconstruction_error(
power_spectrum_features,
bispectrum_features,
n_local_points,
train_idx = np.arange(0, len(power_spectrum_features), 2),
test_idx = np.arange(0, len(power_spectrum_features)),
estimator=None,
n_jobs=4,
)
# local reconstruction error of bispectrum features using power spectrum features
bispectrum_to_power_spectrum_pointwise_lfre = pointwise_local_reconstruction_error(
bispectrum_features,
power_spectrum_features,
n_local_points,
train_idx = np.arange(0, len(power_spectrum_features), 2),
test_idx = np.arange(0, len(power_spectrum_features)),
estimator=None,
n_jobs=4,
)
print("Computing pointwise LFRE finished.")
print(
"LFRE(3-body, 4-body) = ",
np.linalg.norm(power_spectrum_to_bispectrum_pointwise_lfre)/np.sqrt(len(power_spectrum_to_bispectrum_pointwise_lfre))
)
print(
"LFRE(4-body, 3-body) = ",
np.linalg.norm(bispectrum_to_power_spectrum_pointwise_lfre)/np.sqrt(len(power_spectrum_to_bispectrum_pointwise_lfre))
)
fig, (ax34, ax43) = plt.subplots(
1, 2, constrained_layout=True, figsize=(16, 7.5), sharey="row", sharex=True
)
vmax = 0.5
X, Y = np.meshgrid(np.linspace(0.7, 0.9, 9), np.linspace(-0.1, 0.1, 9))
pcm = ax34.contourf(
X,
Y,
power_spectrum_to_bispectrum_pointwise_lfre[81:].reshape(9, 9).T,
vmin=0,
vmax=vmax,
)
ax43.contourf(
X,
Y,
bispectrum_to_power_spectrum_pointwise_lfre[81:].reshape(9, 9).T,
vmin=0,
vmax=vmax,
)
ax34.axhline(y=0, color="red", linewidth=5)
ax43.axhline(y=0, color="red", linewidth=5)
ax34.set_ylabel(r"v/$\pi$")
ax34.set_xlabel(r"u/$\pi$")
ax43.set_xlabel(r"u/$\pi$")
ax34.set_title(r"$X^-$ LFRE(3-body, 4-body)")
ax43.set_title(r"$X^-$ LFRE(4-body, 3-body)")
cbar = fig.colorbar(pcm, ax=[ax34, ax43], label="LFRE", location="bottom")
plt.show()
```
The environments span a manifold which is described by the coordinates $v/\pi$ and $u/\pi$ (please refer to [Pozdnyakov 2020](https://doi.org/10.1103/PhysRevLett.125.166001) for a concrete understanding of the manifold). The LFRE is presented for each environment in the manifold in the two contour plots. It can be seen that the reconstruction error of 4-body features using 3-body features (the left plot) is most significant along the degenerate line (the horizontal red line). This agrees with the fact that the 3-body features remain the same on the degenerate line and can therefore not reconstruct the 4-body features. On the other hand the 4-body features can perfectly reconstruct the 3-body features as seen in the right plot.
| true |
code
| 0.695855 | null | null | null | null |
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Slice-specified-nodes-in-dimspec" data-toc-modified-id="Slice-specified-nodes-in-dimspec-1"><span class="toc-item-num">1 </span>Slice specified nodes in dimspec</a></span></li><li><span><a href="#Test-parallelism" data-toc-modified-id="Test-parallelism-2"><span class="toc-item-num">2 </span>Test parallelism</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Example-task" data-toc-modified-id="Example-task-2.0.1"><span class="toc-item-num">2.0.1 </span>Example task</a></span></li></ul></li><li><span><a href="#Serial-invocation" data-toc-modified-id="Serial-invocation-2.1"><span class="toc-item-num">2.1 </span>Serial invocation</a></span><ul class="toc-item"><li><span><a href="#Maybe-sqash-dimensions-to-fit-into-einsum?" data-toc-modified-id="Maybe-sqash-dimensions-to-fit-into-einsum?-2.1.1"><span class="toc-item-num">2.1.1 </span>Maybe sqash dimensions to fit into einsum?</a></span></li><li><span><a href="#Many-var-parallelisation" data-toc-modified-id="Many-var-parallelisation-2.1.2"><span class="toc-item-num">2.1.2 </span>Many var parallelisation</a></span></li></ul></li><li><span><a href="#Plot-parallelisation-theoretical-speedup" data-toc-modified-id="Plot-parallelisation-theoretical-speedup-2.2"><span class="toc-item-num">2.2 </span>Plot parallelisation theoretical speedup</a></span></li><li><span><a href="#Use-unix-tools" data-toc-modified-id="Use-unix-tools-2.3"><span class="toc-item-num">2.3 </span>Use unix tools</a></span><ul class="toc-item"><li><span><a href="#Threading" data-toc-modified-id="Threading-2.3.1"><span class="toc-item-num">2.3.1 </span>Threading</a></span></li><li><span><a href="#Multiprocessing" data-toc-modified-id="Multiprocessing-2.3.2"><span class="toc-item-num">2.3.2 </span>Multiprocessing</a></span></li></ul></li></ul></li></ul></div>
```
#import ray
import pyrofiler as pyrof
from pyrofiler.pyrofiler import Profiler
from pyrofiler import callbacks
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import sys
from multiprocessing import Pool, Array
from multiprocessing.dummy import Pool as ThreadPool
import os
sns.set_style('whitegrid')
np.random.seed(42)
def work(arg):
i,x,y, par_vars, result_idx= arg
patch = sliced_contract(x, y, par_vars, i)
sl = target_slice(result_idx, par_vars, i)
pool = ThreadPool(processes=2**7)
```
# Slice specified nodes in dimspec
```
def _none_slice():
return slice(None)
def _get_idx(x, idxs, slice_idx, shapes=None):
if shapes is None:
shapes = [2]*len(idxs)
point = np.unravel_index(slice_idx, shapes)
get_point = {i:p for i,p in zip(idxs, point)}
if x in idxs:
p = get_point[x]
return slice(p,p+1)
else:
return _none_slice()
def _slices_for_idxs(idxs, *args, shapes=None, slice_idx=0):
"""Return array of slices along idxs"""
slices = []
for indexes in args:
_slice = [_get_idx(x, idxs, slice_idx, shapes) for x in indexes ]
slices.append(tuple(_slice))
return slices
def log_log_scale():
plt.yscale('log')
plt.xscale('log')
def minorticks():
plt.minorticks_on()
plt.grid(which='minor', alpha=0.5, linestyle='-', axis='both')
```
# Test parallelism
### Example task
```
def get_example_task(A=8, B=10, C=7, dim1=0):
shape1 = [2]*(A+B)
shape2 = [2]*(A+C)
for i in range(dim1):
shape1[-i] = 1
shape2[-i] = 1
T1 = np.random.randn(*shape1)
T2 = np.random.randn(*shape2)
common = list(range(A))
idxs1 = common + list(range(A, A+B))
idxs2 = common + list(range(A+B, A+B+C))
return (T1, idxs1), (T2, idxs2)
x, y = get_example_task(A=9)
x[1], y[1]
```
## Serial invocation
```
def contract(A, B):
a, idxa = A
b, idxb = B
contract_idx = set(idxa) & set(idxb)
result_idx = set(idxa + idxb)
print('contract result idx',result_idx)
C = np.einsum(a,idxa, b,idxb, result_idx)
return C
def sliced_contract(x, y, idxs, num):
slices = _slices_for_idxs(idxs, x[1], y[1], slice_idx=num)
a = x[0][slices[0]]
b = y[0][slices[1]]
with pyrof.timing(f'\tcontract sliced {num}'):
C = contract((a, x[1]), (b, y[1]))
return C
def target_slice(result_idx, idxs, num):
slices = _slices_for_idxs(idxs, result_idx, slice_idx=num)
return slices
with pyrof.timing('contract'):
C = contract(x, y)
```
### Maybe sqash dimensions to fit into einsum?
```
def __contract_bound(A, B):
a, idxa = A
b, idxb = B
contract_idx = set(idxa) & set(idxb)
def glue_first(shape):
sh = [shape[0] * shape[1]] + list(shape[2:])
return sh
result_idx = set(idxa + idxb)
_map_a = {k:v for k,v in zip(idxa, a.shape)}
_map_b = {k:v for k,v in zip(idxb, b.shape)}
_map = {**_map_a, **_map_b}
print(_map)
result_idx = sorted(tuple(_map.keys()))
target_shape = tuple([_map[i] for i in result_idx])
_dimlen = len(result_idx)
_maxdims = 22
print('dimlen',_dimlen)
new_a, new_b = a.shape, b.shape
if _dimlen>_maxdims:
_contr_dim = _dimlen - _maxdims
print(len(new_a), len(new_b))
for i in range(_contr_dim):
idxa = idxa[1:]
idxb = idxb[1:]
new_a = glue_first(new_a)
new_b = glue_first(new_b)
_map_a = {k:v for k,v in zip(idxa, a.shape)}
_map_b = {k:v for k,v in zip(idxb, b.shape)}
_map = {**_map_a, **_map_b}
print(_map)
result_idx = sorted(tuple(_map.keys()))
print(len(new_a), len(new_b))
a = a.reshape(new_a)
b = b.reshape(new_b)
print(a.shape, b.shape)
print(idxa, idxb)
print('btsh',result_idx, target_shape)
C = np.einsum(a,idxa, b,idxb, result_idx)
return C.reshape(*target_shape)
def __add_dims(x, dims, ofs):
arr, idxs = x
arr = arr.reshape(list(arr.shape) + [1]*dims)
md = max(idxs)
return arr, idxs + list(range(md+ofs, ofs+md+dims))
```
### Many var parallelisation
```
prof_seq = Profiler()
prof_seq.use_append()
contract_idx = set(x[1]) & set(y[1])
result_idx = set(x[1] + y[1])
for i in range(1):
_ = contract(x,y)
for rank in range(1,7):
with prof_seq.timing('Single thread'):
C = contract(x,y)
par_vars = list(range(rank))
target_shape = C.shape
with prof_seq.timing('One patch: total'):
i = 0
with prof_seq.timing('One patch: compute'):
patch = sliced_contract(x, y, par_vars, i)
C_par = np.empty(target_shape)
with prof_seq.timing('One patch: assign'):
_slice = target_slice(result_idx, par_vars, i)
C_par[_slice[0]] = patch
```
## Plot parallelisation theoretical speedup
```
prof_seq.data
threads = 2**np.arange(1,7)
C_size = sys.getsizeof(C)
for k in prof_seq.data:
plt.plot(threads, prof_seq.data[k], label=k)
plt.loglog(basex=2, basey=2)
from matplotlib.ticker import FormatStrFormatter
plt.title(f'Single node parallelization one batch test. Task size: {C_size:e}')
plt.xlabel('Thread count')
plt.ylabel('Time')
minorticks()
plt.legend()
plt.savefig('figures/node_par_seqtest.pdf')
plt.close()
```
## Use unix tools
### Threading
```
x,y = get_example_task(A=20, B=9, C=8, dim1=2)
contract_idx = set(x[1]) & set(y[1])
result_idx = set(x[1] + y[1])
prof_thread = Profiler()
prof_thread.use_append()
for i in range(1):
C = contract(x,y)
C_size = sys.getsizeof(C)
target_shape = C.shape
C = None
for rank in range(1,7):
if rank==1:
with prof_thread.timing('Single thread'):
C = contract(x,y)
C = None
with prof_thread.timing('Multithread: total'):
par_vars = list(range(rank))
threads = 2**len(par_vars)
os.global_C = np.empty(target_shape)
with prof_thread.timing('Multithread: work'):
_ = pool.map(work, ((i,x,y,par_vars,result_idx)for i in range(threads)))
#assert np.array_equal(C, os.global_C)
_data = prof_thread.data
print(_data)
_data_knl = {'Single thread': [1.3409993648529053, 1.3587844371795654, 1.3243846893310547, 1.336273193359375, 1.3332529067993164, 1.3412296772003174], 'Multithread: work': [0.7453043460845947, 0.5046432018280029, 0.39226293563842773, 0.40014123916625977, 0.5875647068023682, 1.0763416290283203], 'Multithread: total': [0.7459092140197754, 0.5054154396057129, 0.3927571773529053, 0.4007418155670166, 0.588019847869873, 1.0771734714508057]}
_data_biggest = {'Single thread': [27.42847204208374, 26.855594873428345, 26.628530979156494, 26.862286806106567, 26.71247911453247, 27.049968957901], 'Multithread: work': [14.236661434173584, 7.511402368545532, 4.950175762176514, 3.012814521789551, 2.351712703704834, 1.994131088256836], 'Multithread: total': [14.23719048500061, 7.512014150619507, 4.950707912445068, 3.0133090019226074, 2.3522441387176514, 1.9946098327636719]}
#_data = _data_biggest
threads = 2**np.arange(1,7)
for k in _data:
plt.plot(threads, _data[k], label=k)
plt.loglog(basex=2, basey=2)
plt.yscale('linear')
from matplotlib.ticker import FormatStrFormatter
plt.title(f'Single node parallelization test. Task size: {C_size:e}')
plt.xlabel('Thread count')
plt.ylabel('Time')
minorticks()
plt.legend()
plt.savefig('figures/node_par_threadtest_biggest.pdf')
#plt.rcParams.update({"xtick.bottom" : True, "ytick.left" : True})
sns.set_style('whitegrid')
#sns.set()
_data_block = {
'28':{'Single thread': [4.890172481536865], 'Multithread: work': [5.31355881690979, 2.839036464691162, 1.6587004661560059, 1.4607517719268799, 1.1708364486694336, 1.3796212673187256], 'Multithread: total': [5.31405234336853, 2.839534282684326, 1.659132957458496, 1.4612171649932861, 1.1718018054962158, 1.380187749862671]}
,'29': {'Single thread': [12.708141088485718], 'Multithread: work': [12.543375015258789, 6.445459604263306, 3.702291250228882, 2.225062131881714, 1.7111496925354004, 1.9049854278564453], 'Multithread: total': [12.543986320495605, 6.445924997329712, 3.7027952671051025, 2.2256860733032227, 1.7118234634399414, 1.905548095703125]}
, '30': {'Single thread': [26.65827775001526], 'Multithread: work': [26.532104015350342, 13.471351146697998, 7.361323356628418, 4.6045496463775635, 2.9114484786987305, 2.138317108154297], 'Multithread: total': [26.532758712768555, 13.471930980682373, 7.363482475280762, 4.605044364929199, 2.91215181350708, 2.1388139724731445]}
, '31': {'Single thread': [54.215914249420166], 'Multithread: work': [53.743674755096436, 27.541589498519897, 15.45585584640503, 8.812772750854492, 5.398884296417236, 4.5649192333221436], 'Multithread: total': [53.74607563018799, 27.542162895202637, 15.456344604492188, 8.814988851547241, 5.399648427963257, 4.5654377937316895]}
, '32': {'Single thread': [107.05718398094177], 'Multithread: work': [106.85966396331787, 55.66744685173035, 31.097278356552124, 18.133748292922974, 10.42065167427063, 9.078657865524292], 'Multithread: total': [106.86018991470337, 55.669677734375, 31.099481344223022, 18.13595175743103, 10.421445369720459, 9.080750703811646]}
}
threads = 2**np.arange(1,7)
fig, axs = plt.subplots(1,1, figsize=(6,6))
colors = (plt.cm.gnuplot2(x) for x in np.linspace(.8,.2,len(_data_block)))
for size, _data in _data_block.items():
singl = _data['Single thread']
total = _data['Multithread: total']
c = next(colors)
plt.plot(threads, total, '-D',color=c, label=f'Tensor size {2**(4+int(size))/1e9:.2f}Gb')
plt.plot(threads, singl*len(threads), '--', alpha=.3, color=c )
#from matplotlib.ticker import FormatStrFormatter
plt.loglog(basex=2, basey=2)
#plt.yscale('linear')
plt.grid()
#minorticks()
ax = plt.gca()
#ax.yaxis.set_minor_locator(plt.ticker.LogLocator(base=10.0, subs='all'))
#ax.yaxis.set_minor_formatter(plt.ticker.NullFormatter())
plt.title(f'Single node contraction parallelization for different sizes')
plt.xlabel('Thread count')
plt.ylabel('Time')
plt.grid(True,which="both")
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles[::-1], labels[::-1], loc='upper right')
plt.savefig('figures/node_par_threadtest_gener_jlse.pdf')
#plt.rcParams.update({"xtick.bottom" : True, "ytick.left" : True})
sns.set_style('whitegrid')
#sns.set()
_data_block = {
'27':{'Single thread': [4.890172481536865], 'Multithread: work': [5.31355881690979, 2.839036464691162, 1.6587004661560059, 1.4607517719268799, 1.1708364486694336, 1.3796212673187256], 'Multithread: total': [5.31405234336853, 2.839534282684326, 1.659132957458496, 1.4612171649932861, 1.1718018054962158, 1.380187749862671]}
,'30': {'Single thread': [37.403658866882324], 'Multithread: work': [39.51915979385376, 21.37852430343628, 11.835341453552246, 7.165068864822388, 4.922534942626953, 4.410918235778809], 'Multithread: total': [39.519590854644775, 21.378950595855713, 11.83582329750061, 7.1655051708221436, 4.923001050949097, 4.411387205123901
]}
}
threads = 2**np.arange(1,7)
fig, axs = plt.subplots(1,1, figsize=(6,6))
colors = (plt.cm.gnuplot2(x) for x in np.linspace(.8,.2,len(_data_block)))
for size, _data in _data_block.items():
singl = _data['Single thread']
total = _data['Multithread: total']
c = next(colors)
plt.plot(threads, total, '-D',color=c, label=f'Tensor size {2**(4+int(size))/1e9:.2f}Gb')
plt.plot(threads, singl*len(threads), '--', alpha=.3, color=c )
#from matplotlib.ticker import FormatStrFormatter
plt.loglog(basex=2, basey=2)
#plt.yscale('linear')
plt.grid()
#minorticks()
ax = plt.gca()
#ax.yaxis.set_minor_locator(plt.ticker.LogLocator(base=10.0, subs='all'))
#ax.yaxis.set_minor_formatter(plt.ticker.NullFormatter())
plt.title(f'Single node contraction parallelization for different sizes')
plt.xlabel('Thread count')
plt.ylabel('Time')
plt.grid(True,which="both")
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles[::-1], labels[::-1], loc='upper right')
plt.savefig('figures/node_par_threadtest_gener_theta.pdf')
```
### Multiprocessing
```
flat_size = len(C.flatten())
with pyrof.timing('init array'):
os.global_C = np.empty(target_shape)
#os.global_C = tonumpyarray(Array('d', flat_size))
#us.global_C = os.global_C.reshape(target_shape)
pool = Pool(processes=threads)
print('inited pool')
with pyrof.timing('parallel work'):
print('started work')
_ = pool.map(work, range(threads))
C_size = sys.getsizeof(os.global_C)
print(f'result size: {C_size:e}')
assert np.array_equal(C, os.global_C)
del os.global_C
```
| true |
code
| 0.300752 | null | null | null | null |
|
```
import datetime as dt
import numpy as np
import pandas as pd
import panel as pn
pn.extension('tabulator')
```
The ``Tabulator`` widget allows displaying and editing a pandas DataFrame. The `Tabulator` is a largely backward compatible replacement for the [`DataFrame`](./DataFrame.ipynb) widget and will eventually replace it. It is built on the [Tabulator](http://tabulator.info/) library, which provides for a wide range of features.
For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).
#### Parameters:
For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
##### Core
* **``aggregators``** (``dict``): A dictionary mapping from index name to an aggregator to be used for `hierarchical` multi-indexes (valid aggregators include 'min', 'max', 'mean' and 'sum'). If separate aggregators for different columns are required the dictionary may be nested as `{index_name: {column_name: aggregator}}`
* **``configuration``** (``dict``): A dictionary mapping used to specify tabulator options not explicitly exposed by panel.
* **``editors``** (``dict``): A dictionary mapping from column name to a bokeh `CellEditor` instance or tabulator editor specification.
* **``embed_content``** (``boolean``): Whether to embed the `row_content` or to dynamically fetch it when a row is expanded.
* **``expanded``** (``list``): The currently expanded rows as a list of integer indexes.
* **``filters``** (``list``): A list of client-side filter definitions that are applied to the table.
* **``formatters``** (``dict``): A dictionary mapping from column name to a bokeh `CellFormatter` instance or tabulator formatter specification.
* **``groupby``** (`list`): Groups rows in the table by one or more columns.
* **``header_align``** (``dict`` or ``str``): A mapping from column name to header alignment or a fixed header alignment, which should be one of `'left'`, `'center'`, `'right'`.
* **``header_filters``** (``boolean``/``dict``): A boolean enabling filters in the column headers or a dictionary providing filter definitions for specific columns.
* **``hierarchical``** (boolean, default=False): Whether to render multi-indexes as hierarchical index (note hierarchical must be enabled during instantiation and cannot be modified later)
* **``hidden_columns``** (`list`): List of columns to hide.
* **``layout``** (``str``, `default='fit_data_table'`): Describes the column layout mode with one of the following options `'fit_columns'`, `'fit_data'`, `'fit_data_stretch'`, `'fit_data_fill'`, `'fit_data_table'`.
* **``frozen_columns``** (`list`): List of columns to freeze, preventing them from scrolling out of frame. Column can be specified by name or index.
* **``frozen_rows``**: (`list`): List of rows to freeze, preventing them from scrolling out of frame. Rows can be specified by positive or negative index.
* **``page``** (``int``, `default=1`): Current page, if pagination is enabled.
* **``page_size``** (``int``, `default=20`): Number of rows on each page, if pagination is enabled.
* **``pagination``** (`str`, `default=None`): Set to `'local` or `'remote'` to enable pagination; by default pagination is disabled with the value set to `None`.
* **``row_content``** (``callable``): A function that receives the expanded row as input and should return a Panel object to render into the expanded region below the row.
* **``row_height``** (``int``, `default=30`): The height of each table row.
* **``selection``** (``list``): The currently selected rows as a list of integer indexes.
* **``selectable``** (`boolean` or `str` or `int`, `default=True`): Defines the selection mode:
* `True`
Selects rows on click. To select multiple use Ctrl-select, to select a range use Shift-select
* `False`
Disables selection
* `'checkbox'`
Adds a column of checkboxes to toggle selections
* `'checkbox-single'`
Same as 'checkbox' but header does not alllow select/deselect all
* `'toggle'`
Selection toggles when clicked
* `int`
The maximum number of selectable rows.
* **``selectable_rows``** (`callable`): A function that should return a list of integer indexes given a DataFrame indicating which rows may be selected.
* **``show_index``** (``boolean``, `default=True`): Whether to show the index column.
* **``text_align``** (``dict`` or ``str``): A mapping from column name to alignment or a fixed column alignment, which should be one of `'left'`, `'center'`, `'right'`.
* **`theme`** (``str``, `default='simple'`): The CSS theme to apply (note that changing the theme will restyle all tables on the page), which should be one of `'default'`, `'site'`, `'simple'`, `'midnight'`, `'modern'`, `'bootstrap'`, `'bootstrap4'`, `'materialize'`, `'bulma'`, `'semantic-ui'`, or `'fast'`.
* **``titles``** (``dict``): A mapping from column name to a title to override the name with.
* **``value``** (``pd.DataFrame``): The pandas DataFrame to display and edit
* **``widths``** (``dict``): A dictionary mapping from column name to column width in the rendered table.
##### Display
* **``disabled``** (``boolean``): Whether the widget is editable
* **``name``** (``str``): The title of the widget
##### Properties
* **``current_view``** (``DataFrame``): The current view of the table that is displayed, i.e. after sorting and filtering are applied
* **``selected_dataframe``** (``DataFrame``): A DataFrame reflecting the currently selected rows.
___
The ``Tabulator`` widget renders a DataFrame using an interactive grid, which allows directly editing the contents of the dataframe in place, with any changes being synced with Python. The `Tabulator` will usually determine the appropriate formatter appropriately based on the type of the data:
```
df = pd.DataFrame({
'int': [1, 2, 3],
'float': [3.14, 6.28, 9.42],
'str': ['A', 'B', 'C'],
'bool': [True, False, True],
'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10)],
'datetime': [dt.datetime(2019, 1, 1, 10), dt.datetime(2020, 1, 1, 12), dt.datetime(2020, 1, 10, 13)]
}, index=[1, 2, 3])
df_widget = pn.widgets.Tabulator(df)
df_widget
```
## Formatters
By default the widget will pick bokeh ``CellFormatter`` and ``CellEditor`` types appropriate to the dtype of the column. These may be overriden by explicit dictionaries mapping from the column name to the editor or formatter instance. For example below we create a ``SelectEditor`` instance to pick from four options in the ``str`` column and a ``NumberFormatter`` to customize the formatting of the float values:
```
from bokeh.models.widgets.tables import NumberFormatter, BooleanFormatter
bokeh_formatters = {
'float': NumberFormatter(format='0.00000'),
'bool': BooleanFormatter(),
}
pn.widgets.Tabulator(df, formatters=bokeh_formatters)
```
The list of valid Bokeh formatters includes:
* [BooleanFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.BooleanFormatter)
* [DateFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.DateFormatter)
* [NumberFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.NumberFormatter)
* [HTMLTemplateFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.HTMLTemplateFormatter)
* [StringFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.StringFormatter)
* [ScientificFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.ScientificFormatter)
However in addition to the formatters exposed by Bokeh it is also possible to provide valid formatters built into the Tabulator library. These may be defined either as a string or as a dictionary declaring the 'type' and other arguments, which are passed to Tabulator as the `formatterParams`:
```
tabulator_formatters = {
'float': {'type': 'progress', 'max': 10},
'bool': {'type': 'tickCross'}
}
pn.widgets.Tabulator(df, formatters=tabulator_formatters)
```
The list of valid Tabulator formatters can be found in the [Tabulator documentation](http://tabulator.info/docs/4.9/format#format-builtin).
## Editors
Just like the formatters, the `Tabulator` will natively understand the Bokeh `Editor` types. However, in the background it will replace most of them with equivalent editors natively supported by the tabulator library:
```
from bokeh.models.widgets.tables import CheckboxEditor, NumberEditor, SelectEditor, DateEditor, TimeEditor
bokeh_editors = {
'float': NumberEditor(),
'bool': CheckboxEditor(),
'str': SelectEditor(options=['A', 'B', 'C', 'D']),
}
pn.widgets.Tabulator(df[['float', 'bool', 'str']], editors=bokeh_editors)
```
Therefore it is often preferable to use one of the [Tabulator editors](http://tabulator.info/docs/5.0/edit#edit) directly. Note that in addition to the standard Tabulator editors the Tabulator widget also supports `'date'` and `'datetime'` editors:
```
from bokeh.models.widgets.tables import CheckboxEditor, NumberEditor, SelectEditor
bokeh_editors = {
'float': {'type': 'number', 'max': 10, 'step': 0.1},
'bool': {'type': 'tickCross', 'tristate': True, 'indeterminateValue': None},
'str': {'type': 'autocomplete', 'values': True},
'date': 'date',
'datetime': 'datetime'
}
edit_table = pn.widgets.Tabulator(df, editors=bokeh_editors)
edit_table
```
When editing a cell the data stored on the `Tabulator.value` is updated and you can listen to any changes using the usual `.param.watch(callback, 'value')` mechanism. However if you need to know precisely which cell was changed you may also attach an `on_edit` callback which will be passed a `TableEditEvent` containing the:
- `column`: Name of the edited column
- `row`: Integer index of the edited row
- `value`: The updated value
```
edit_table.on_edit(lambda e: print(e.column, e.row, e.value))
```
### Column layouts
By default the DataFrame widget will adjust the sizes of both the columns and the table based on the contents, reflecting the default value of the parameter: `layout="fit_data_table"`. Alternative modes allow manually specifying the widths of the columns, giving each column equal widths, or adjusting just the size of the columns.
#### Manual column widths
To manually adjust column widths provide explicit `widths` for each of the columns:
```
custom_df = pd._testing.makeMixedDataFrame()
pn.widgets.Tabulator(custom_df, widths={'index': 70, 'A': 50, 'B': 50, 'C': 70, 'D': 130})
```
You can also declare a single width for all columns this way:
```
pn.widgets.Tabulator(custom_df, widths=130)
```
#### Autosize columns
To automatically adjust the columns dependending on their content set `layout='fit_data'`:
```
pn.widgets.Tabulator(custom_df, layout='fit_data', width=400)
```
To ensure that the table fits all the data but also stretches to fill all the available space, set `layout='fit_data_stretch'`:
```
pn.widgets.Tabulator(custom_df, layout='fit_data_stretch', width=400)
```
The `'fit_data_fill'` option on the other hand won't stretch the last column but still fill the space:
```
pn.widgets.Tabulator(custom_df, layout='fit_data_fill', width=400)
```
Perhaps the most useful of these options is `layout='fit_data_table'` (and therefore the default) since this will automatically size both the columns and the table:
```
pn.widgets.Tabulator(custom_df, layout='fit_data_table')
```
#### Equal size
The simplest option is simply to allocate each column equal amount of size:
```
pn.widgets.Tabulator(custom_df, layout='fit_columns', width=650)
```
## Alignment
The content of a column or its header can be horizontally aligned with `text_align` and `header_align`. These two parameters accept either a string that globally defines the alignment or a dictionnary that declares which particular columns are meant to be aligned and how.
```
pn.widgets.Tabulator(df, header_align='center', text_align={'str': 'right', 'bool': 'center'}, widths=200)
```
## Styling
The ability to style the contents of a table based on its content and other considerations is very important. Thankfully pandas provides a powerful [styling API](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html), which can be used in conjunction with the `Tabulator` widget. Specifically the `Tabulator` widget exposes a `.style` attribute just like a `pandas.DataFrame` which lets the user apply custom styling using methods like `.apply` and `.applymap`. For a detailed guide to styling see the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html).
Here we will demonstrate with a simple example, starting with a basic table:
```
style_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
styled = pn.widgets.Tabulator(style_df)
```
Next we define two functions which apply styling cell-wise (`color_negative_red`) and column-wise (`highlight_max`), which we then apply to the `Tabulator` using the `.style` API and then display the `styled` table:
```
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
styled.style.applymap(color_negative_red).apply(highlight_max)
styled
```
## Theming
The Tabulator library ships with a number of themes, which are defined as CSS stylesheets. For that reason changing the theme on one table will affect all Tables on the page and it will usually be preferable to see the theme once at the class level like this:
```python
pn.widgets.Tabulator.theme = 'default'
```
For a full list of themes see the [Tabulator documentation](http://tabulator.info/docs/4.9/theme), however the default themes include:
- `'simple'`
- `'default'`
- `'midnight'`
- `'site'`
- `'modern'`
- `'bootstrap'`
- `'bootstrap4'`
- `'materialize'`
- `'semantic-ui'`
- `'bulma'`
## Selection
The `selection` parameter controls which rows in the table are selected and can be set from Python and updated by selecting rows on the frontend:
```
sel_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
select_table = pn.widgets.Tabulator(sel_df, selection=[0, 3, 7])
select_table
```
Once initialized, the ``selection`` parameter will return the integer indexes of the selected rows, while the ``selected_dataframe`` property will return a new DataFrame containing just the selected rows:
```
select_table.selection = [1, 4, 9]
select_table.selected_dataframe
```
The `selectable` parameter declares how the selections work.
- `True`: Selects rows on click. To select multiple use Ctrl-select, to select a range use Shift-select
- `False`: Disables selection
- `'checkbox'`: Adds a column of checkboxes to toggle selections
- `'checkbox-single'`: Same as `'checkbox'` but disables (de)select-all in the header
- `'toggle'`: Selection toggles when clicked
- Any positive `int`: A number that sets the maximum number of selectable rows
```
pn.widgets.Tabulator(sel_df, selection=[0, 3, 7], selectable='checkbox')
```
Additionally we can also disable selection for specific rows by providing a `selectable_rows` function. The function must accept a DataFrame and return a list of integer indexes indicating which rows are selectable, e.g. here we disable selection for every second row:
```
pn.widgets.Tabulator(sel_df, selectable_rows=lambda df: list(range(0, len(df), 2)))
```
### Freezing rows and columns
Sometimes your table will be larger than can be displayed in a single viewport, in which case scroll bars will be enabled. In such cases, you might want to make sure that certain information is always visible. This is where the `frozen_columns` and `frozen_rows` options come in.
#### Frozen columns
When you have a large number of columns and can't fit them all on the screen you might still want to make sure that certain columns do not scroll out of view. The `frozen_columns` option makes this possible by specifying a list of columns that should be frozen, e.g. `frozen_columns=['index']` will freeze the index column:
```
wide_df = pd._testing.makeCustomDataframe(10, 10, r_idx_names=['index'])
pn.widgets.Tabulator(wide_df, frozen_columns=['index'], width=400)
```
#### Frozen rows
Another common scenario is when you have certain rows with special meaning, e.g. aggregates that summarize the information in the rest of the table. In this case you may want to freeze those rows so they do not scroll out of view. You can achieve this by setting a list of `frozen_rows` by integer index (which can be positive or negative, where negative values are relative to the end of the table):
```
date_df = pd._testing.makeTimeDataFrame().iloc[:10]
agg_df = pd.concat([date_df, date_df.median().to_frame('Median').T, date_df.mean().to_frame('Mean').T])
agg_df.index= agg_df.index.map(str)
pn.widgets.Tabulator(agg_df, frozen_rows=[-2, -1], width=400)
```
## Row contents
A table can only display so much information without becoming difficult to scan. We may want to render additional information to a table row to provide additional context. To make this possible you can provide a `row_content` function which is given the table row as an argument and should return a panel object that will be rendered into an expanding region below the row. By default the contents are fetched dynamically whenever a row is expanded, however using the `embed_content` parameter we can embed all the content.
Below we create a periodic table of elements where the Wikipedia page for each element will be rendered into the expanded region:
```
from bokeh.sampledata.periodic_table import elements
periodic_df = elements[['atomic number', 'name', 'atomic mass', 'metal', 'year discovered']].set_index('atomic number')
content_fn = lambda row: pn.pane.HTML(
f'<iframe src="http://en.wikipedia.org/wiki/{row["name"]}?printable=yes" width="100%" height="300px"></iframe>',
sizing_mode='stretch_width'
)
periodic_table = pn.widgets.Tabulator(
periodic_df, height=500, layout='fit_columns', sizing_mode='stretch_width',
row_content=content_fn, embed_content=True
)
periodic_table
```
The currently expanded rows can be accessed (and set) on the `expanded` parameter:
```
periodic_table.expanded
```
## Grouping
Another useful option is the ability to group specific rows together, which can be achieved using `groups` parameter. The `groups` parameter should be composed of a dictionary mapping from the group titles to the column names:
```
pn.widgets.Tabulator(date_df, groups={'Group 1': ['A', 'B'], 'Group 2': ['C', 'D']})
```
## Groupby
In addition to grouping columns we can also group rows by the values along one or more columns:
```
from bokeh.sampledata.autompg import autompg
pn.widgets.Tabulator(autompg, groupby=['yr', 'origin'], height=240)
```
### Hierarchical Multi-index
The `Tabulator` widget can also render a hierarchical multi-index and aggregate over specific categories. If a DataFrame with a hierarchical multi-index is supplied and the `hierarchical` is enabled the widget will group data by the categories in the order they are defined in. Additionally for each group in the multi-index an aggregator may be provided which will aggregate over the values in that category.
For example we may load population data for locations around the world broken down by sex and age-group. If we specify aggregators over the 'AgeGrp' and 'Sex' indexes we can see the aggregated values for each of those groups (note that we do not have to specify an aggregator for the outer index since we specify the aggregators over the subgroups in this case the 'Sex'):
```
from bokeh.sampledata.population import data as population_data
pop_df = population_data[population_data.Year == 2020].set_index(['Location', 'AgeGrp', 'Sex'])[['Value']]
pn.widgets.Tabulator(value=pop_df, hierarchical=True, aggregators={'Sex': 'sum', 'AgeGrp': 'sum'}, height=400)
```
## Pagination
When working with large tables we sometimes can't send all the data to the browser at once. In these scenarios we can enable pagination, which will fetch only the currently viewed data from the server backend. This may be enabled by setting `pagination='remote'` and the size of each page can be set using the `page_size` option:
```
large_df = pd._testing.makeCustomDataframe(100000, 5)
%%time
paginated_table = pn.widgets.Tabulator(large_df, pagination='remote', page_size=10)
paginated_table
```
Contrary to the `'remote'` option, `'local'` pagination entirely loads the data but still allows to display it on multiple pages.
```
%%time
medium_df = pd._testing.makeCustomDataframe(10000, 5)
paginated_table = pn.widgets.Tabulator(large_df, pagination='local', page_size=10)
paginated_table
```
## Filtering
A very common scenario is that you want to attach a number of filters to a table in order to view just a subset of the data. You can achieve this through callbacks or other reactive approaches but the `.add_filter` method makes it much easier.
#### Constant and Widget filters
The simplest approach to filtering is to select along a column with a constant or dynamic value. The `.add_filter` method allows passing in constant values, widgets and parameters. If a widget or parameter is provided the table will watch the object for changes in the value and update the data in response. The filtering will depend on the type of the constant or dynamic value:
- scalar: Filters by checking for equality
- `tuple`: A tuple will be interpreted as range.
- `list`/`set`: A list or set will be interpreted as a set of discrete scalars and the filter will check if the values in the column match any of the items in the list.
As an example we will create a DataFrame with some data of mixed types:
```
mixed_df = pd._testing.makeMixedDataFrame()
filter_table = pn.widgets.Tabulator(mixed_df)
filter_table
```
Now we will start adding filters one-by-one, e.g. to start with we add a filter for the `'A'` column, selecting a range from 0 to 3:
```
filter_table.add_filter((0, 3), 'A')
```
Next we add dynamic widget based filter, a `RangeSlider` which allows us to further narrow down the data along the `'A'` column:
```
slider = pn.widgets.RangeSlider(start=0, end=3, name='A Filter')
filter_table.add_filter(slider, 'A')
```
Lastly we will add a `MultiSelect` filter along the `'C'` column:
```
select = pn.widgets.MultiSelect(options=['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], name='C Filter')
filter_table.add_filter(select, 'C')
```
Now let's display the table alongside the widget based filters:
```
pn.Row(
pn.Column(slider, select),
filter_table
)
```
After filtering you can inspect the current view with the `current_view` property:
```
filter_table.current_view
```
#### Function based filtering
For more complex filtering tasks you can supply a function that should accept the DataFrame to be filtered as the first argument and must return a filtered copy of the data. Let's start by loading some data.
```
import sqlite3
from bokeh.sampledata.movies_data import movie_path
con = sqlite3.Connection(movie_path)
movies_df = pd.read_sql('SELECT Title, Year, Genre, Director, Writer, imdbRating from omdb', con)
movies_df = movies_df[~movies_df.Director.isna()]
movies_table = pn.widgets.Tabulator(movies_df, pagination='remote', layout='fit_columns', width=800)
```
By using the `pn.bind` function, which binds widget and parameter values to a function, complex filtering can be achieved. E.g. here we will add a filter function that uses tests whether the string or regex is contained in the 'Director' column of a listing of thousands of movies:
```
director_filter = pn.widgets.TextInput(name='Director filter', value='Chaplin')
def contains_filter(df, pattern, column):
if not pattern:
return df
return df[df[column].str.contains(pattern)]
movies_table.add_filter(pn.bind(contains_filter, pattern=director_filter, column='Director'))
pn.Row(director_filter, movies_table)
```
### Client-side filtering
In addition to the Python API the Tabulator widget also offers a client-side filtering API, which can be exposed through `header_filters` or by manually adding filters to the rendered Bokeh model. The API for declaring header filters is almost identical to the API for defining [Editors](#Editors). The `header_filters` can either be enabled by setting it to `True` or by manually supplying filter types for each column. The types of filters supports all the same options as the editors, in fact if you do not declare explicit `header_filters` the tabulator will simply use the defined `editors` to determine the correct filter type:
```
bokeh_editors = {
'float': {'type': 'number', 'max': 10, 'step': 0.1},
'bool': {'type': 'tickCross', 'tristate': True, 'indeterminateValue': None},
'str': {'type': 'autocomplete', 'values': True}
}
header_filter_table = pn.widgets.Tabulator(
df[['float', 'bool', 'str']], height=140, width=400, layout='fit_columns',
editors=bokeh_editors, header_filters=True
)
header_filter_table
```
When a filter is applied client-side the `filters` parameter is synced with Python. The definition of `filters` looks something like this:
```
[{'field': 'Director', 'type': '=', 'value': 'Steven Spielberg'}]
```
Try applying a filter and then inspect the `filters` parameter:
```
header_filter_table.filters
```
For all supported filtering types see the [Tabulator Filtering documentation](http://tabulator.info/docs/4.9/filter).
If we want to change the filter type for the `header_filters` we can do so in the definition by supplying a dictionary indexed by the column names and then either providing a dictionary which may define the `'type'`, a comparison `'func'`, a `'placeholder'` and any additional keywords supported by the particular filter type.
```
movie_filters = {
'Title': {'type': 'input', 'func': 'like', 'placeholder': 'Enter title'},
'Year': {'placeholder': 'Enter year'},
'Genre': {'type': 'input', 'func': 'like', 'placeholder': 'Enter genre'},
'Director': {'type': 'input', 'func': 'like', 'placeholder': 'Enter director'},
'Writer': {'type': 'input', 'func': 'like', 'placeholder': 'Enter writer'},
'imdbRating': {'type': 'number', 'func': '>=', 'placeholder': 'Enter minimum rating'}
}
filter_table = pn.widgets.Tabulator(
movies_df, pagination='remote', layout='fit_columns', page_size=10, sizing_mode='stretch_width',
header_filters=movie_filters
)
filter_table
```
## Downloading
The `Tabulator` also supports triggering a download of the data as a CSV or JSON file dependending on the filename. The download can be triggered with the `.download()` method, which optionally accepts the filename as the first argument.
To trigger the download client-side (i.e. without involving the server) you can use the `.download_menu` method which creates a `TextInput` and `Button` widget, which allow setting the filename and triggering the download respectively:
```
download_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
download_table = pn.widgets.Tabulator(download_df)
filename, button = download_table.download_menu(
text_kwargs={'name': 'Enter filename', 'value': 'default.csv'},
button_kwargs={'name': 'Download table'}
)
pn.Row(
pn.Column(filename, button),
download_table
)
```
## Streaming
When we are monitoring some source of data that updates over time, we may want to update the table with the newly arriving data. However, we do not want to transmit the entire dataset each time. To handle efficient transfer of just the latest data, we can use the `.stream` method on the `Tabulator` object:
```
stream_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
stream_table = pn.widgets.Tabulator(stream_df, layout='fit_columns', width=450)
stream_table
```
As example, we will schedule a periodic callback that streams new data every 1000ms (i.e. 1s) five times in a row:
```
def stream_data(follow=True):
stream_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))
stream_table.stream(stream_df, follow=follow)
pn.state.add_periodic_callback(stream_data, period=1000, count=5)
```
If you are viewing this example with a live Python kernel you will be able to watch the table update and scroll along. If we want to disable the scrolling behavior, we can set `follow=False`:
```
stream_data(follow=False)
```
## Patching
In certain cases we don't want to update the table with new data but just patch existing data.
```
patch_table = pn.widgets.Tabulator(df[['int', 'float', 'str', 'bool']])
patch_table
```
The easiest way to patch the data is by supplying a dictionary as the patch value. The dictionary should have the following structure:
```python
{
column: [
(index: int or slice, value),
...
],
...
}
```
As an example, below we will patch the 'bool' and 'int' columns. On the `'bool'` column we will replace the 0th and 2nd row and on the `'int'` column we replace the first two rows:
```
patch_table.patch({
'bool': [
(0, False),
(2, False)
],
'int': [
(slice(0, 2), [3, 2])
]
})
```
## Static Configuration
Panel does not expose all options available from Tabulator, if a desired option is not natively supported, it can be set via the `configuration` argument.
This dictionary can be seen as a base dictionary which the tabulator object fills and passes to the Tabulator javascript-library.
As an example, we can turn off sorting and resizing of columns by disabling the `headerSort` and `resizable` options.
```
df = pd.DataFrame({
'int': [1, 2, 3],
'float': [3.14, 6.28, 9.42],
'str': ['A', 'B', 'C'],
'bool': [True, False, True],
'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10)]
}, index=[1, 2, 3])
df_widget = pn.widgets.Tabulator(df, configuration={'columnDefaults': {
'resizable': False,
'headerSort': True
}})
df_widget.servable()
```
These and other available tabulator options are listed at http://tabulator.info/docs/4.9/options.
Obviously not all options will work though, especially any settable callbacks and options which are set by the internal panel tabulator module (for example the `columns` option).
Additionally it should be noted that the configuration parameter is not responsive so it can only be set at instantiation time.
| true |
code
| 0.592608 | null | null | null | null |
|
# VGGNet in Keras
In this notebook, we fit a model inspired by the "very deep" convolutional network [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) to classify flowers into the 17 categories of the Oxford Flowers data set. Derived from [these](https://github.com/the-deep-learners/TensorFlow-LiveLessons/blob/master/notebooks/old/L3-3c__TFLearn_VGGNet.ipynb) [two](https://github.com/the-deep-learners/TensorFlow-LiveLessons/blob/master/notebooks/alexnet_in_keras.ipynb) earlier notebooks.
#### Set seed for reproducibility
```
import numpy as np
np.random.seed(42)
```
#### Load dependencies
```
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import TensorBoard # for part 3.5 on TensorBoard
```
#### Load *and preprocess* data
```
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot=True)
```
#### Design neural network architecture
```
model = Sequential()
model.add(Conv2D(64, 3, activation='relu', input_shape=(224, 224, 3)))
model.add(Conv2D(64, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(128, 3, activation='relu'))
model.add(Conv2D(128, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(256, 3, activation='relu'))
model.add(Conv2D(256, 3, activation='relu'))
model.add(Conv2D(256, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(Conv2D(512, 3, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(17, activation='softmax'))
model.summary()
```
#### Configure model
```
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
```
#### Configure TensorBoard (for part 5 of lesson 3)
```
tensorbrd = TensorBoard('logs/vggnet')
```
#### Train!
```
model.fit(X, Y, batch_size=64, epochs=16, verbose=1, validation_split=0.1, shuffle=True,
callbacks=[tensorbrd])
```
| true |
code
| 0.723206 | null | null | null | null |
|
```
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Notebook authors: Kevin P. Murphy ([email protected])
# and Mahmoud Soliman ([email protected])
# This notebook reproduces figures for chapter 15 from the book
# "Probabilistic Machine Learning: An Introduction"
# by Kevin Murphy (MIT Press, 2021).
# Book pdf is available from http://probml.ai
```
<a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
<a href="https://colab.research.google.com/github/probml/pml-book/blob/main/pml1/figure_notebooks/chapter15_neural_networks_for_sequences_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Figure 15.1:<a name='15.1'></a> <a name='rnn'></a>
Recurrent neural network (RNN) for generating a variable length output sequence $ \bm y _ 1:T $ given an optional fixed length input vector $ \bm x $
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.1.png" width="256"/>
## Figure 15.2:<a name='15.2'></a> <a name='rnnTimeMachine'></a>
Example output of length 500 generated from a character level RNN when given the prefix ``the''. We use greedy decoding, in which the most likely character at each step is computed, and then fed back into the model. The model is trained on the book \em The Time Machine by H. G. Wells.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/rnn_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
## Figure 15.3:<a name='15.3'></a> <a name='imageCaptioning'></a>
Illustration of a CNN-RNN model for image captioning. The pink boxes labeled ``LSTM'' refer to a specific kind of RNN that we discuss in \cref sec:LSTM . The pink boxes labeled $W_ \text emb $ refer to embedding matrices for the (sampled) one-hot tokens, so that the input to the model is a real-valued vector. From https://bit.ly/2FKnqHm . Used with kind permission of Yunjey Choi
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.3.png" width="256"/>
## Figure 15.4:<a name='15.4'></a> <a name='rnnBiPool'></a>
(a) RNN for sequence classification. (b) Bi-directional RNN for sequence classification
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.4_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.4_B.png" width="256"/>
## Figure 15.5:<a name='15.5'></a> <a name='biRNN'></a>
(a) RNN for transforming a sequence to another, aligned sequence. (b) Bi-directional RNN for the same task
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.5_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.5_B.png" width="256"/>
## Figure 15.6:<a name='15.6'></a> <a name='deepRNN'></a>
Illustration of a deep RNN. Adapted from Figure 9.3.1 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.6.png" width="256"/>
## Figure 15.7:<a name='15.7'></a> <a name='seq2seq'></a>
Encoder-decoder RNN architecture for mapping sequence $ \bm x _ 1:T $ to sequence $ \bm y _ 1:T' $
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.7.png" width="256"/>
## Figure 15.8:<a name='15.8'></a> <a name='NMT'></a>
(a) Illustration of a seq2seq model for translating English to French. The - character represents the end of a sentence. From Figure 2.4 of <a href='#Luong2016thesis'>[Luo16]</a> . Used with kind permission of Minh-Thang Luong. (b) Illustration of greedy decoding. The most likely French word at each step is highlighted in green, and then fed in as input to the next step of the decoder. From Figure 2.5 of <a href='#Luong2016thesis'>[Luo16]</a> . Used with kind permission of Minh-Thang Luong
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.8_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.8_B.png" width="256"/>
## Figure 15.9:<a name='15.9'></a> <a name='BPTT'></a>
An RNN unrolled (vertically) for 3 time steps, with the target output sequence and loss node shown explicitly. From Figure 8.7.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.9.png" width="256"/>
## Figure 15.10:<a name='15.10'></a> <a name='GRU'></a>
Illustration of a GRU. Adapted from Figure 9.1.3 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.10.png" width="256"/>
## Figure 15.11:<a name='15.11'></a> <a name='LSTM'></a>
Illustration of an LSTM. Adapted from Figure 9.2.4 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.11.png" width="256"/>
## Figure 15.12:<a name='15.12'></a> <a name='stsProb'></a>
Conditional probabilities of generating each token at each step for two different sequences. From Figures 9.8.1--9.8.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.12_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.12_B.png" width="256"/>
## Figure 15.13:<a name='15.13'></a> <a name='beamSearch'></a>
Illustration of beam search using a beam of size $K=2$. The vocabulary is $\mathcal Y = \ A,B,C,D,E\ $, with size $V=5$. We assume the top 2 symbols at step 1 are A,C. At step 2, we evaluate $p(y_1=A,y_2=y)$ and $p(y_1=C,y_2=y)$ for each $y \in \mathcal Y $. This takes $O(K V)$ time. We then pick the top 2 partial paths, which are $(y_1=A,y_2=B)$ and $(y_1=C,y_2=E)$, and continue in the obvious way. Adapted from Figure 9.8.3 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.13.png" width="256"/>
## Figure 15.14:<a name='15.14'></a> <a name='textCNN'></a>
Illustration of the TextCNN model for binary sentiment classification. Adapted from Figure 15.3.5 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.14.png" width="256"/>
## Figure 15.15:<a name='15.15'></a> <a name='wavenet'></a>
Illustration of the wavenet model using dilated (atrous) convolutions, with dilation factors of 1, 2, 4 and 8. From Figure 3 of <a href='#wavenet'>[Aar+16]</a> . Used with kind permission of Aaron van den Oord
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.15.png" width="256"/>
## Figure 15.16:<a name='15.16'></a> <a name='attention'></a>
Attention computes a weighted average of a set of values, where the weights are derived by comparing the query vector to a set of keys. From Figure 10.3.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.16.pdf" width="256"/>
## Figure 15.17:<a name='15.17'></a> <a name='attenRegression'></a>
Kernel regression in 1d. (a) Kernel weight matrix. (b) Resulting predictions on a dense grid of test points.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks/kernel_regression_attention.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.17_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.17_B.png" width="256"/>
## Figure 15.18:<a name='15.18'></a> <a name='seq2seqAttn'></a>
Illustration of seq2seq with attention for English to French translation. Used with kind permission of Minh-Thang Luong
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.18.png" width="256"/>
## Figure 15.19:<a name='15.19'></a> <a name='translationHeatmap'></a>
Illustration of the attention heatmaps generated while translating two sentences from Spanish to English. (a) Input is ``hace mucho frio aqui.'', output is ``it is very cold here.''. (b) Input is ``¿todavia estan en casa?'', output is ``are you still at home?''. Note that when generating the output token ``home'', the model should attend to the input token ``casa'', but in fact it seems to attend to the input token ``?''. Adapted from https://www.tensorflow.org/tutorials/text/nmt_with_attention
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.19_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.19_B.png" width="256"/>
## Figure 15.20:<a name='15.20'></a> <a name='EHR'></a>
Example of an electronic health record. In this example, 24h after admission to the hospital, the RNN classifier predicts the risk of death as 19.9\%; the patient ultimately died 10 days after admission. The ``relevant'' keywords from the input clinical notes are shown in red, as identified by an attention mechanism. From Figure 3 of <a href='#Rajkomar2018'>[Alv+18]</a> . Used with kind permission of Alvin Rakomar
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.20.png" width="256"/>
## Figure 15.21:<a name='15.21'></a> <a name='SNLI'></a>
Illustration of sentence pair entailment classification using an MLP with attention to align the premise (``I do need sleep'') with the hypothesis (``I am tired''). White squares denote active attention weights, blue squares are inactive. (We are assuming hard 0/1 attention for simplicity.) From Figure 15.5.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.21.png" width="256"/>
## Figure 15.22:<a name='15.22'></a> <a name='showAttendTell'></a>
Image captioning using attention. (a) Soft attention. Generates ``a woman is throwing a frisbee in a park''. (b) Hard attention. Generates ``a man and a woman playing frisbee in a field''. From Figure 6 of <a href='#showAttendTell'>[Kel+15]</a> . Used with kind permission of Kelvin Xu
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.22_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.22_B.png" width="256"/>
## Figure 15.23:<a name='15.23'></a> <a name='transformerTranslation'></a>
Illustration of how encoder self-attention for the word ``it'' differs depending on the input context. From https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html . Used with kind permission of Jakob Uszkoreit
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.23.png" width="256"/>
## Figure 15.24:<a name='15.24'></a> <a name='multiHeadAttn'></a>
Multi-head attention. Adapted from Figure 9.3.3 of <a href='#dive'>[Zha+20]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.24.png" width="256"/>
## Figure 15.25:<a name='15.25'></a> <a name='positionalEncodingSinusoids'></a>
(a) Positional encoding matrix for a sequence of length $n=60$ and an embedding dimension of size $d=32$. (b) Basis functions for columsn 6 to 9.
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/positional_encoding.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.25_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.25_B.png" width="256"/>
## Figure 15.26:<a name='15.26'></a> <a name='transformer'></a>
The transformer. From <a href='#Weng2018attention'>[Lil18]</a> . Used with kind permission of Lilian Weng. Adapted from Figures 1--2 of <a href='#Vaswani2017'>[Ash+17]</a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.26.png" width="256"/>
## Figure 15.27:<a name='15.27'></a> <a name='attentionBakeoff'></a>
Comparison of (1d) CNNs, RNNs and self-attention models. From Figure 10.6.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.27.png" width="256"/>
## Figure 15.28:<a name='15.28'></a> <a name='VIT'></a>
The Vision Transformer (ViT) model. This treats an image as a set of input patches. The input is prepended with the special CLASS embedding vector (denoted by *) in location 0. The class label for the image is derived by applying softmax to the final ouput encoding at location 0. From Figure 1 of <a href='#ViT'>[Ale+21]</a> . Used with kind permission of Alexey Dosovitski
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.28.png" width="256"/>
## Figure 15.29:<a name='15.29'></a> <a name='transformers_taxonomy'></a>
Venn diagram presenting the taxonomy of different efficient transformer architectures. From <a href='#Tay2020transformers'>[Yi+20]</a> . Used with kind permission of Yi Tay
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.29.pdf" width="256"/>
## Figure 15.30:<a name='15.30'></a> <a name='rand_for_fast_atten'></a>
Attention matrix $\mathbf A $ rewritten as a product of two lower rank matrices $\mathbf Q ^ \prime $ and $(\mathbf K ^ \prime )^ \mkern -1.5mu\mathsf T $ with random feature maps $\boldsymbol \phi ( \bm q _i) \in \mathbb R ^M$ and $\boldsymbol \phi ( \bm v _k) \in \mathbb R ^M$ for the corresponding queries/keys stored in the rows/columns. Used with kind permission of Krzysztof Choromanski
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.30.png" width="256"/>
## Figure 15.31:<a name='15.31'></a> <a name='fatten'></a>
Decomposition of the attention matrix $\mathbf A $ can be leveraged to improve attention computations via matrix associativity property. To compute $\mathbf AV $, we first calculate $\mathbf G =( \bm k ^ \prime )^ \mkern -1.5mu\mathsf T \mathbf V $ and then $ \bm q ^ \prime \mathbf G $, resulting in linear in $N$ space and time complexity. Used with kind permission of Krzysztof Choromanski
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.31.png" width="256"/>
## Figure 15.32:<a name='15.32'></a> <a name='elmo'></a>
Illustration of ELMo bidrectional language model. Here $y_t=x_ t+1 $ when acting as the target for the forwards LSTM, and $y_t = x_ t-1 $ for the backwards LSTM. (We add \text \em bos \xspace and \text \em eos \xspace sentinels to handle the edge cases.) From <a href='#Weng2019LM'>[Lil19]</a> . Used with kind permission of Lilian Weng
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.32.png" width="256"/>
## Figure 15.33:<a name='15.33'></a> <a name='GPT'></a>
Illustration of (a) BERT and (b) GPT. $E_t$ is the embedding vector for the input token at location $t$, and $T_t$ is the output target to be predicted. From Figure 3 of <a href='#bert'>[Jac+19]</a> . Used with kind permission of Ming-Wei Chang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.33_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.33_B.png" width="256"/>
## Figure 15.34:<a name='15.34'></a> <a name='bertEmbedding'></a>
Illustration of how a pair of input sequences, denoted A and B, are encoded before feeding to BERT. From Figure 14.8.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.34.png" width="256"/>
## Figure 15.35:<a name='15.35'></a> <a name='bert-tasks'></a>
Illustration of how BERT can be used for different kinds of supervised NLP tasks. (a) Single sentence classification (e.g., sentiment analysis); (b) Sentence-pair classification (e.g., textual entailment); (d) Single sentence tagging (e.g., shallow parsing); (d) Question answering. From Figure 4 of <a href='#bert'>[Jac+19]</a> . Used with kind permission of Ming-Wei Chang
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_B.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_C.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.35_D.png" width="256"/>
## Figure 15.36:<a name='15.36'></a> <a name='T5'></a>
Illustration of how the T5 model (``Text-to-text Transfer Transformer'') can be used to perform multiple NLP tasks, such as translating English to German; determining if a sentence is linguistic valid or not ( \bf CoLA stands for ``Corpus of Linguistic Acceptability''); determining the degree of semantic similarity ( \bf STSB stands for ``Semantic Textual Similarity Benchmark''); and abstractive summarization. From Figure 1 of <a href='#T5'>[Col+19]</a> . Used with kind permission of Colin Raffel
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_15.36.png" width="256"/>
## References:
<a name='wavenet'>[Aar+16]</a> V. Aaron, D. Sander, Z. Heiga, S. Karen, V. Oriol, G. Alex, K. Nal, S. Andrew and K. Koray. "WaveNet: A Generative Model for Raw Audio". abs/1609.03499 (2016). arXiv: 1609.03499
<a name='ViT'>[Ale+21]</a> D. Alexey, B. Lucas, K. A. Dirk, Z. Xiaohua, U. T. Mostafa, M. Matthias, H. G. Sylvain, U. Jakob and H. Neil. "An Image is Worth 16x16 Words: Transformers for ImageRecognition at Scale". (2021).
<a name='Rajkomar2018'>[Alv+18]</a> R. Alvin, O. Eyal, C. Kai, D. A. Nissan, H. Michaela, L. PeterJ, L. LiuXiaobing, M. Jake, S. Mimi, S. Patrik, Y. Hector, Z. Kun, Z. Yi, F. Gerardo, D. GavinE, I. Jamie, L. Quoc, L. K. Alexander, T. Justin, W. De, W. James, W. Jimbo, L. Dana, V. L, C. Katherine, P. Michael, M. MadabushiSrinivasan, S. NigamH, B. AtulJ, H. D, C. Claire, C. GregS and D. Jeffrey. "Scalable and accurate deep learning with electronic healthrecords". In: NPJ Digit Med (2018).
<a name='Vaswani2017'>[Ash+17]</a> V. Ashish, S. Noam, P. Niki, U. Jakob, J. Llion, G. AidanN, K. KaiserLukasz and P. Illia. "Attention Is All You Need". (2017).
<a name='T5'>[Col+19]</a> R. Colin, S. Noam, R. Adam, L. LeeKatherine, N. Sharan, M. Michael, Z. ZhouYanqi, L. Wei and L. PeterJ. "Exploring the Limits of Transfer Learning with a UnifiedText-to-Text Transformer". abs/1910.10683 (2019). arXiv: 1910.10683
<a name='bert'>[Jac+19]</a> D. Jacob, C. Ming-Wei, L. Kenton and T. ToutanovaKristina. "BERT: Pre-training of Deep Bidirectional Transformers forLanguage Understanding". (2019).
<a name='showAttendTell'>[Kel+15]</a> X. Kelvin, B. JimmyLei, K. Ryan, C. K. Aaron, S. Ruslan, Z. S and B. Yoshua. "Show, Attend and Tell: Neural Image Caption Generation withVisual Attention". (2015).
<a name='Weng2018attention'>[Lil18]</a> W. Lilian "Attention? Attention!". In: lilianweng.github.io/lil-log (2018).
<a name='Weng2019LM'>[Lil19]</a> W. Lilian "Generalized Language Models". In: lilianweng.github.io/lil-log (2019).
<a name='Luong2016thesis'>[Luo16]</a> M. Luong "Neural machine translation". (2016).
<a name='Tay2020transformers'>[Yi+20]</a> T. Yi, D. Mostafa, B. Dara and M. MetzlerDonald. "Efficient Transformers: A Survey". abs/2009.06732 (2020). arXiv: 2009.06732
<a name='dive'>[Zha+20]</a> A. Zhang, Z. Lipton, M. Li and A. Smola. "Dive into deep learning". (2020).
| true |
code
| 0.538194 | null | null | null | null |
|
```
# default_exp visrectrans
```
# VisRecTrans
> A class for creating a custom [Vision Transformer (ViT)](https://arxiv.org/abs/2010.11929) model for visual recognition
```
#export
#hide
from nbdev.showdoc import *
from fastai.vision.all import *
import timm
import math
import warnings
#export
#hide
class EmbedBlock (Module) :
def __init__ (self, num_patches, embed_dim) :
self.cls_tokens = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embeds = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
def forward (self, x) :
B = x.shape[0]
cls_tokens = self.cls_tokens.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim = 1)
x = x + self.pos_embeds
return x
#export
#hide
class Header (Module) :
def __init__ (self, ni, num_classes) :
self.head = nn.Linear(ni, num_classes)
def forward (self, x) :
x = x[:, 0] # Extracting the clsass token, which is used for the classification task.
x = self.head(x)
return x
#export
#hide
def custom_ViT (timm_model_name, num_patches, embed_dim, ni, num_classes, pretrained = True) :
model = timm.create_model(timm_model_name, pretrained)
module_layers = list(model.children())
return nn.Sequential(
module_layers[0],
EmbedBlock(num_patches, embed_dim),
nn.Sequential(*module_layers[1:-1]),
Header(ni, num_classes)
)
#export
#hide
# The function below is heavily inspired by "https://github.com/rwightman/pytorch-image-models/blob/5f9aff395c224492e9e44248b15f44b5cc095d9c/timm/models/vision_transformer.py"
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(layer, param, mean=0., std=1., a=-2., b=2.):
# type : (Tensor, float, float, float, float) -> Tensor
"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
tensor = layer.get_parameter(param)
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
#export
class VisRecTrans :
"""Class for setting up a vision transformer for visual recognition.
Returns a pretrained custom ViT model for the given `model_name` and `num_classes`, by default, or, with randomly initialized parameters, if `pretrained`
is set to False.
"""
models_list = ['vit_large_patch16_224', 'vit_large_patch16_224_in21k', 'vit_huge_patch14_224_in21k', 'vit_small_patch16_224', 'vit_small_patch16_224_in21k']
# Two tasks : (1) Generalize the assignments of num_path (2) (3) ()
def __init__(self, model_name, num_classes, pretrained = True) :
self.model_name = model_name
self.num_classes = num_classes
self.pretrained = pretrained
if self.model_name == 'vit_small_patch16_224' :
self.num_patches = 196
self.embed_dim = 384
self.ni = 384
elif self.model_name == 'vit_small_patch16_224_in21k' :
self.num_patches = 196
self.embed_dim = 384
self.ni = 384
elif self.model_name == 'vit_large_patch16_224' :
self.num_patches = 196
self.embed_dim = 1024
self.ni = 1024
elif self.model_name == 'vit_large_patch16_224_in21k' :
self.num_patches = 196
self.embed_dim = 1024
self.ni = 1024
elif self.model_name == 'vit_huge_patch14_224_in21k' :
self.num_patches = 256
self.embed_dim = 1280
self.ni = 1280
def create_model (self) :
"""Method for creating the model.
"""
return custom_ViT(self.model_name, self.num_patches, self.embed_dim, self.ni, self.num_classes, self.pretrained)
def initialize (self, model) :
"""Mthod for initializing the given `model`. This method uses truncated normal distribution for
initializing the position embedding as well as the class token, and, the head of the model is
initialized using He initialization.
"""
trunc_normal_(model[1], 'cls_tokens')
trunc_normal_(model[1], 'pos_embeds')
apply_init(model[3], nn.init.kaiming_normal_)
def get_callback (self) :
"""Method for getting the callback to train the embedding block of the `model`. It is highly recommended
to use the callback, returned by this method, while training a ViT model.
"""
class TrainEmbedCallback(Callback) :
def before_train(self) :
self.model[1].training = True
self.model[1].requires_grad_(True)
def before_validation(self) :
self.model[1].training = False
self.model[1].requires_grad_(False)
return TrainEmbedCallback()
show_doc(VisRecTrans.create_model)
show_doc(VisRecTrans.initialize)
show_doc(VisRecTrans.get_callback)
```
Let's see if this class is working well :
```
vis_rec_ob = VisRecTrans('vit_small_patch16_224', 10, False)
model_test = vis_rec_ob.create_model()
vis_rec_ob.initialize(model_test)
assert isinstance(model_test, nn.Sequential)
```
As we see, the model is a sequential list of layers, and can be used with the `Learner` class of [fastai](https://docs.fast.ai), as we use any other model.
#### The list of models supported by the `VisRecTrans` class :
```
VisRecTrans.models_list
```
| true |
code
| 0.883814 | null | null | null | null |
|
## Some Math
Let's assume all objects are always centered at $x=0$ to simplify the FFT handling.
We need a few relations to understand the math.
1. The Fourier transform of a function like $x^2W(x)$ is $F[x^2W(x)] \propto \frac{d^2\hat{W}(k)}{dk^2}$.
2. The Fourier transform of a Gaussian is a Gaussian, which we can write generically as $\exp(-\alpha^2 k^2)$. Here $\alpha$ is related to the real-space FWHM of the profile via some constants we won't bother with.
3. A convolution in real-space is a product in Fourier space.
4. A weighted sum over a profile in real-space can be written as an integral in Fourier space.
This last relation is worth discussin in detail. Suppose we have an image $I(x)$, a weight function $W(x)$, and we want to compute the integral $f = \int dx I(x) W(x)$. This integral is actuall the value of the convolution of $I(x)$ with $W(x)$ at $x=0$,
$$
f = \int dx I(x) W(x) = \left. \int dx I(x) W(x - y)\right|_{y = 0}
$$
In Fourier space we can write this relation as
$$
f \propto \left.\int dk \hat{I}(k)\hat{W}(k) \exp(-iky)\right|_{y=0} = \int dk \hat{I}(k)\hat{W}(k)
$$
So this property combined item 1 above means we can write the weighted moments of an object in real-space as integrals in Fourier space over the weight function and its derivatives
$$
f \propto \int dk \hat{I}(k)\hat{W}(k)
$$
$$
<x^2> \propto \int dk \hat{I}(k)\frac{d^{2}\hat{W}(k)}{dk_x^2}
$$
$$
<xy> \propto \int dk \hat{I}(k)\frac{d^2\hat{W}(k)}{dk_x dk_y}
$$
$$
<y^2> \propto \int dk \hat{I}(k)\frac{d^2\hat{W}(k)}{dk_y^2}
$$
## What about the PSF?
So now let's assume we have an object, a PSF, and a weight function. Further, let's assume that the weight function is always bigger than the PSF and that the weight function is Gaussian.
In this case, we can immediately see that all of the derivatives of the weight function in Fourier space can be written as a product of some polynomial and the weight function iself. The constraint that the weight function be larger than the PSF means that $\alpha_{psf} < \alpha_{w}$. Finally, we have some object with $\alpha_g$.
In terms of the profile of $k$ we have the following situation illustrated in the plot below.
```
import proplot as plot
import numpy as np
def prof(k, a):
return np.log10(np.exp(-(a*k)**2))
k = np.logspace(-1, 1.5, 100)
apsf = 1
aw = 1.5
ag = 0.25
fig, axs = plot.subplots(figsize=(4, 4))
axs.semilogx(k, prof(k, np.sqrt(ag**2+apsf**2)), label='gal+psf')
axs.semilogx(k, prof(k, apsf), label='psf')
axs.semilogx(k, prof(k, ag), label='gal')
axs.semilogx(k, prof(k, aw), label='wgt')
axs.format(xlabel='log10[k]', ylabel='log10[f(k)]')
axs.legend()
```
From this plot you can see that even for real-space moments, as long as the Fourier transforms of the moment kernels are broader than PSF, we remove modes suppressed by the PSF. Thus we can set these suppressed modes (where the PSF amplitude cannot be deconvolved) in Fourier space to zero without harm.
| true |
code
| 0.611846 | null | null | null | null |
|
# Neural Networks
## 1. Neural Networks
In this section, we will implement backpropagation algorithm to learn the parameters for the neural network.
### 1.1 Visualizing the data
The data is the same as assignment 3, 5000 training examples, each contains a 20 pixel by 20 pixel grayscale image of the digit.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
from scipy.io import loadmat
data = loadmat('ex3data1.mat')
X = data["X"] # 5000x400 np array
y = data["y"] # 5000x1 np array (2d)
y = y.flatten() # change to (5000,) 1d array and
y[y==10] = 0 # in original data, 10 is used to represent 0
def displayData(X):
""" displays the 100 rows of digit image data stored in X in a nice grid.
It returns the figure handle fig, ax
"""
# form the big 10 x 10 matrix containing all 100 images data
# padding between 2 images
pad = 1
# initialize matrix with -1 (black)
wholeimage = -np.ones((20*10+9, 20*10+9))
# fill values
for i in range(10):
for j in range(10):
wholeimage[j*21:j*21+20, i*21:i*21+20] = X[10*i+j, :].reshape((20, 20))
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(wholeimage.T, cmap=plt.cm.gray, vmin=-1, vmax=1)
ax.axis('off')
return fig, ax
# randomly select 100 data points to display
rand_indices = np.random.randint(0, 5000, size=100)
sel = X[rand_indices, :]
# display images
fig, ax = displayData(sel)
```
### 1.2 Model representation
Our neural network is shown in the following figure. It has 3 layers: an input layer, a hidden layer and an output layer. The neural network used contains 25 units in the 2nd layer and 10 output units (corresponding to 10 digit classes).

### 1.3 Feedforward and cost function
Recall that the cost function for the neural network (without regularization) is:
$$ J(\theta)=\frac{1}{m}\sum_{i=1}^{m} \sum_{k=1}^{K}[-y^{(i)}log((h_\theta(x^{(i)}))_k)-(1-y^{(i)})log(1-(h_\theta(x^{(i)}))_k)]$$
where $h_\theta(x^{(i)})$ is computed as shown in the above figure and K=10 is the total number of possible labels. Note that $h_\theta(x^{(i)})_k = a_k^{(3)}$ is the activation of the k-th output unit. Also, remember that whereas the original labels (in the variable y) were 0, 1, ..., 9, for the purpose of training a neural network, we need to recode the labels as vectors containing only values 0 or 1, so:
$$ y = \left[\matrix{1\\ 0\\ 0\\ \vdots\\ 0}\right], \left[\matrix{0\\ 1\\ 0\\ \vdots\\ 0}\right], ..., or \left[\matrix{0\\ 0\\ 0\\ \vdots\\ 1}\right] $$
#### Vectorization
Matrix dimensions:
$X_{wb}$: 5000 x 401
$\Theta^{(1)}$: 25 x 401
$\Theta^{(2)}$: 10 x 26
$a^{(2)}$: 5000 x 25 or 5000 x 26 after adding intercept terms
$a^{(3)} or H_\theta(x)$: 5000 x 10
$Y$: 5000 x 10
$$a^{(2)} = g(X_{wb}\Theta^{(1)^T})$$
$$ H_\theta(x) = a^{(3)} = g(a^{(2)}_{wb}\Theta^{(2)^T})$$
$$ H_\theta(x) = \left[\matrix{-(h_\theta(x^{(1)}))^T-\\ -(h_\theta(x^{(2)}))^T-\\ \vdots\\ -(h_\theta(x^{(m)}))^T-}\right] $$
$$ Y = \left[\matrix{-(y^{(1)})^T-\\ -(y^{(2)})^T-\\ \vdots\\ -(y^{(m)})^T-}\right] $$
Therefore, cost is:
$$ J(\theta)=\frac{1}{m} \sum_{matrix-elements} (-Y .* log(H_\theta(x))-(1-Y) .* log(1-H_\theta(x))) $$
Note the element wise multiplication (.*) and sum of all matrix elements in the above equation.
### 1.4 Regularized cost function
The cost function for neural networks with regularization is given by:
$$ J(\theta)=\frac{1}{m}\sum_{i=1}^{m} \sum_{k=1}^{K}[-y^{(i)}log((h_\theta(x^{(i)}))_k)-(1-y^{(i)})log(1-(h_\theta(x^{(i)}))_k)] + \frac{\lambda}{2m}\left[\sum_{j=1}^{25}\sum_{k=1}^{400}(\Theta_{j, k}^{(1)})^2 + \sum_{j=1}^{10}\sum_{k=1}^{25}(\Theta_{j, k}^{(2)})^2\right]$$
Note that even though the additional regularization term seems complicated with all the cascaded Sigma symbols, it is actually just the sum of all elements (after taking square) in the $\Theta$ matrix, one of them is 25 by 400, the other is 10 by 25 (recall that bias term is by convention not included in regularization). If your regularization parameter $\lambda$ is very very large, then all your $\Theta$ will converge to zero.
#### Vectorization
For the regularization term, there's actually nothing much to vectorize. Using elementwise self-multiplication then sum all elements in the result will do it:
$$ J(\theta)=\frac{1}{m} \sum_{matrix-elements} (-Y .* log(H_\theta(x))-(1-Y) .* log(1-H_\theta(x))) + \frac{\lambda}{2m} \left[\sum_{matrix-elements}(\Theta_{j, k}^{(1)} .* \Theta_{j, k}^{(1)})+\sum_{matrix-elements}(\Theta_{j, k}^{(2)} .* \Theta_{j, k}^{(2)})\right]$$
```
def sigmoid(z):
""" sigmoid(z) computes the sigmoid of z. z can be a number,
vector, or matrix.
"""
g = 1 / (1 + np.exp(-z))
return g
def nnCostFunction(nn_params, input_lsize, hidden_lsize, num_labels, X, y, lmd):
""" computes the cost and gradient of the neural network. The
parameters for the neural network are "unrolled" into the vector
nn_params and need to be converted back into the weight matrices.
The returned parameter grad should be a "unrolled" vector of the
partial derivatives of the neural network.
X should already include bias terms
Y is a 2d matrix
"""
# number of training samples
m, n = X.shape
# restore Theta1 and Theta2 from nn_params
Theta1 = nn_params[:hidden_lsize*(input_lsize+1)].reshape((hidden_lsize, input_lsize+1))
Theta2 = nn_params[hidden_lsize*(input_lsize+1):].reshape((num_labels, hidden_lsize+1))
# forward propagation
a2 = sigmoid(X @ Theta1.T)
a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)
a3 = sigmoid(a2_wb @ Theta2.T) # i.e. H_theta
# Calculate cost
temp1 = -y * np.log(a3) - (1-y) * np.log(1-a3)
temp2 = np.sum((Theta1**2).flatten()) + np.sum((Theta2**2).flatten())
J = np.sum(temp1.flatten()) / m + lmd * temp2 / (2*m)
return J
# define input_lsize, hidden_lsize and numb_labels
input_lsize = 400
hidden_lsize = 25
num_labels = 10
m = len(y) # number of samples
# add bias terms to X
X_wb = np.concatenate((np.ones((m, 1)), X), axis=1)
# convert y to 2d matrix Y, 5000 by 10
# each row represents a sample, containing 0 or 1
Y = np.zeros((m, num_labels))
for i, v in enumerate(y):
# # NOTE: v=0 maps to position 9
# if v != 0:
# Y[i, v-1] = 1
# else:
# Y[i, 9] = 1
#print(Y[:100, :])
# using Python's zero-indexing convention
Y[i, v] = 1
# Load pre-calculated nn_params Theta1 and Theta2
# In ex4weights are 2 parameters:
# Theta1: 25 by 401
# Theta2: 10 by 26
# from scipy.io import loadmat
data = loadmat('ex3weights.mat')
Theta1 = data["Theta1"]
Theta2 = data["Theta2"]
# unroll Theta1 and Theta2 into nn_params
# NOTE: ndarray.flatten() will unroll by row, which does not match the A(:) behavior in MATLAB (by column)
# However, since the flattened data will be reshaped by ndarray,reshape(), which by default
# reshape by row, so you will actually get the original Theta1 and Theta2 back
# In summary, your flatten() and reshape() function should use the same order
# either both by numpy default, or both by 'F' order
nn_params = np.concatenate((Theta1.flatten(), Theta2.flatten()))
print(nn_params.shape) # should be (10285,)
# Regularization factor
lmd = 0
# Test nnCostFunction()
J = nnCostFunction(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(J)
print("Expected ~0.287629")
# test cost function with reularization
lmd = 1
J = nnCostFunction(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(J)
print("Expected around 0.383770")
```
## 2. Backpropagation
In this part, we implement the backpropagation algo to compute the gradient for the neural network cost function. Once this is done, we will be able to train the neural network by minimizing the cost function using an optimizer.
### 2.1 Sigmoid gradient
The gradient for the sigmoid function can be computed as:
$$ g'(z)=\frac{d}{dz}g(z)=g(z)(1-g(z))$$
where
$$g(z)=\frac{1}{1+e^{-z}}$$
For large values (both positive and negative) of z, the gradient should be close to 0. When z = 0, the gradient should be exactly 0.25.
```
def sigmoidGradient(z):
""" computes the gradient of the sigmoid function
evaluated at z. This should work regardless if z is a matrix or a
vector. In particular, if z is a vector or matrix, you should return
the gradient for each element.
"""
return sigmoid(z) * (1 - sigmoid(z))
# test sigmoidGradient(z)
z = np.array([-10, 0, 10])
print(sigmoidGradient(z))
```
### 2.2 Random initialization
When training neural networks, it is important to randomly initialize the parameters for symmetry breaking. Otherwise, the units in hidden layers will be identical to each other.
One effective strategy for random initialization is to randomly select values for $\Theta^{(l)}$ uniformly in the range $[-\epsilon_{init}, \epsilon_{init}]$. You should use $\epsilon_{init}=0.12$. This range of values ensures that the parameters are kept small and makes the learning more efficient.
```
def randInitializeWeights(L_in, L_out):
""" randomly initializes the weights of a layer with
L_in incoming connections and L_out outgoing connections.
Note that return variable W should be set to a matrix of size(L_out, 1 + L_in) as
the first column of W handles the "bias" terms.
"""
epsilon_init = 0.12
W = np.random.rand(L_out, 1+L_in) * 2 * epsilon_init - epsilon_init
return W
```
### 2.3 Backpropagation

Recall that the intuition behind the backpropagation algorithm is as follows. Given a training example (x(t); y(t)), we will first run a "forward pass" to compute all the activations throughout the network, including the output value of the hypothesis $h_\theta(x)$. Then, for each node $j$ in layer $l$, we would like to compute an "error term" $\delta_j^{(l)}$ that measures how much that node was "responsible" for any errors in the output.
For an output node, we can directly measure the difference between the network's activation and the true target value, and use that to define $\delta_j^{(3)}$(since layer 3 is the output layer). For the hidden units, you will compute
$\delta_j^{(l)}$ based on a weighted average of the error terms of the nodes in layer $(l + 1)$.
Detailed steps are as follows:
1) Perform a feedforward pass, computing the activations for Layers 2 and 3
2) For each output unit k in Layer 3 (the output layer), set
$$\delta_k^{(3)}=a_k^{(3)}-y_k$$
where $y_k\in[0,1]$ indicates whether the current training example belongs to class k or not.
3) For Layer 2, set
$$\delta^{(2)} = (\Theta^{(2)})^T\delta^{(3)}.*g'(z^{(2)})$$
4) Accumulate the gradient from this example using the following formula. Note that you should skip or remove $\delta_0^{(2)}$:
$$\Delta^{(l)}=\Delta^{(l)}+\delta^{(l+1)}(a^{(l)})^T$$
Do this for all training examples.
5) Obtain the gradient by dividing the accumulated gradients by m:
$$\frac{\partial}{\partial\Theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}$$
#### Vectorization
Here, we still use the full vectorization form that we used above, so we have:
$$\delta^{(3)}=a^{(3)}-y$$
$$\delta^{(2)} = \delta^{(3)}\Theta^{(2)}.*g'(z^{(2)})$$
$$\Delta^{(l)}=(\delta^{(l+1)})^Ta^{(l)}$$
where the matrix dimensions are as follows:
$X_{wb}, a^{(1)}$: 5000 x 401 with intercept terms
$a^{(2)}, \delta^{(2)}, z^{(2)}$: 5000 x 25, without intercept terms
$a^{(3)}, y, \delta^{(3)}$: 5000 x 10
$\Theta^{(1)}$: 25 x 401 (but intercept terms will remain unchanged in gradient descent)
$\Theta^{(2)}$: 10 x 26 (but intercept terms will remain unchanged in gradient descent)
### 2.4 Regularized Neural Networks
To account for regularization, we can add an additional term after computing the gradient using backpropagation.
The formula are as follows:
$$\frac{\partial}{\partial\Theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}\qquad for\; j=0$$
$$\frac{\partial}{\partial\Theta_{ij}^{(l)}}J(\Theta) = D_{ij}^{(l)} = \frac{1}{m}\Delta_{ij}^{(l)}+\frac{\lambda}{m}\Theta_{ij}^{(l)}\qquad for\; j=1$$
Note that you should not regularize the first column of $\Theta$.
```
def nnCostFunction2(nn_params, input_lsize, hidden_lsize, num_labels, X, y, lmd):
""" computes the cost and gradient of the neural network. The
parameters for the neural network are "unrolled" into the vector
nn_params and need to be converted back into the weight matrices.
The returned parameter grad should be a "unrolled" vector of the
partial derivatives of the neural network.
X should already include bias terms
Y is a 2d matrix
"""
# number of training samples
m, n = X.shape
# restore Theta1 and Theta2 from nn_params
Theta1 = nn_params[:hidden_lsize*(input_lsize+1)].reshape((hidden_lsize, input_lsize+1))
Theta2 = nn_params[hidden_lsize*(input_lsize+1):].reshape((num_labels, hidden_lsize+1))
# forward propagation
z2 = X @ Theta1.T
a2 = sigmoid(z2)
a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)
a3 = sigmoid(a2_wb @ Theta2.T) # i.e. H_theta
# Calculate cost
temp1 = -y * np.log(a3) - (1-y) * np.log(1-a3)
temp2 = np.sum((Theta1**2).flatten()) + np.sum((Theta2**2).flatten())
J = np.sum(temp1.flatten()) / m + lmd * temp2 / (2*m)
# Calculate gradient
delta3 = a3 - y # 5000x10
delta2 = delta3 @ Theta2[:, 1:] * sigmoidGradient(z2) # 5000x25
DT2 = delta3.T @ a2_wb # 10x26
DT1 = delta2.T @ X # 25x401, X is a1
Theta1_grad = DT1 / m
Theta2_grad = DT2 / m
# print("Theta1.shape is {}".format(Theta1.shape))
# print("Theta2.shape is {}".format(Theta2.shape))
# print("Theta1_grad.shape is {}".format(Theta1_grad.shape))
# print("Theta2_grad.shape is {}".format(Theta2_grad.shape))
# adding regularization
Theta1_grad[:, 1:] += lmd * Theta1[:, 1:] / m
Theta2_grad[:, 1:] += lmd * Theta2[:, 1:] / m
# unroll gradients (note in numpy, default order is by row first)
grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()))
return J, grad
# test gradient without regularization
lmd = 0
debug_J, debug_grad = nnCostFunction2(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(debug_grad[:10])
print("Expected: [ 6.18712766e-05 0.00000000e+00 0.00000000e+00 4.15336892e-09 \n" +
"-5.29868773e-08 1.42184272e-07 1.59715308e-06 -8.89999550e-07 \n" +
"-1.45513067e-06 -4.08953470e-07]")
# test gradient with regularization
lmd = 3
debug_J, debug_grad = nnCostFunction2(nn_params, input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd)
print(debug_grad[:10])
print("Expected: [ 6.18712766e-05 -6.33744979e-12 1.31648811e-12 2.87621717e-14 \n" +
"3.09854983e-10 -3.45710507e-09 -2.85907272e-08 -1.54564033e-08 \n" +
"2.10275154e-08 1.92242492e-08]")
```
### 2.6 Learning parameters using 'minimize' function
```
from scipy.optimize import minimize
# initial conidition, 1d array
init_Theta1 = randInitializeWeights(input_lsize, hidden_lsize)
init_Theta2 = randInitializeWeights(hidden_lsize, num_labels)
init_nn_params = np.concatenate((init_Theta1.flatten(), init_Theta2.flatten()))
# run optimization
result = minimize(nnCostFunction2, init_nn_params, args=(input_lsize, hidden_lsize, num_labels, X_wb, Y, lmd),
method='TNC', jac=True, options={'disp': True})
print(result.x)
# Obtain Theta1 and Theta2 from result.x
nn_params = result.x
Theta1 = nn_params[:hidden_lsize*(input_lsize+1)].reshape((hidden_lsize, input_lsize+1))
Theta2 = nn_params[hidden_lsize*(input_lsize+1):].reshape((num_labels, hidden_lsize+1))
def predict(X, Theta1, Theta2):
""" predicts output given network parameters Theta1 and Theta2 in Theta.
The prediction from the neural network will be the label that has the largest output.
"""
a2 = sigmoid(X @ Theta1.T)
# add intercept terms to a2
m, n = a2.shape
a2_wb = np.concatenate((np.ones((m, 1)), a2), axis=1)
a3 = sigmoid(a2_wb @ Theta2.T)
# print(a3[:10, :])
# apply np.argmax to the output matrix to find the predicted label
# for that training sample
p = np.argmax(a3, axis=1)
# p[p==10] = 0
return p # this is a 1d array
# prediction accuracy
pred = predict(X_wb, Theta1, Theta2)
print(pred.shape)
accuracy = np.sum((pred==y).astype(int))/m*100
print('Training accuracy is {:.2f}%'.format(accuracy))
# randomly show 10 images and corresponding results
# randomly select 10 data points to display
rand_indices = np.random.randint(0, 5000, size=10)
sel = X[rand_indices, :]
for i in range(10):
# Display predicted digit
print("Predicted {} for this image: ".format(pred[rand_indices[i]]))
# display image
fig, ax = plt.subplots(figsize=(2, 2))
ax.imshow(sel[i, :].reshape(20, 20).T, cmap=plt.cm.gray, vmin=-1, vmax=1)
ax.axis('off')
plt.show()
```
| true |
code
| 0.649912 | null | null | null | null |
|
## Image Cleaner Widget
fastai offers several widgets to support the workflow of a deep learning practitioner. The purpose of the widgets are to help you organize, clean, and prepare your data for your model. Widgets are separated by data type.
```
from fastai.vision import *
from fastai.widgets import DatasetFormatter, ImageCleaner, ImageDownloader, download_google_images
from fastai.gen_doc.nbdoc import *
%reload_ext autoreload
%autoreload 2
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = cnn_learner(data, models.resnet18, metrics=error_rate)
learn.fit_one_cycle(2)
learn.save('stage-1')
```
We create a databunch with all the data in the training set and no validation set (DatasetFormatter uses only the training set)
```
db = (ImageList.from_folder(path)
.split_none()
.label_from_folder()
.databunch())
learn = cnn_learner(db, models.resnet18, metrics=[accuracy])
learn.load('stage-1');
show_doc(DatasetFormatter)
```
The [`DatasetFormatter`](/widgets.image_cleaner.html#DatasetFormatter) class prepares your image dataset for widgets by returning a formatted [`DatasetTfm`](/vision.data.html#DatasetTfm) based on the [`DatasetType`](/basic_data.html#DatasetType) specified. Use `from_toplosses` to grab the most problematic images directly from your learner. Optionally, you can restrict the formatted dataset returned to `n_imgs`.
```
show_doc(DatasetFormatter.from_similars)
from fastai.gen_doc.nbdoc import *
from fastai.widgets.image_cleaner import *
show_doc(DatasetFormatter.from_toplosses)
show_doc(ImageCleaner)
```
[`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) is for cleaning up images that don't belong in your dataset. It renders images in a row and gives you the opportunity to delete the file from your file system. To use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) we must first use `DatasetFormatter().from_toplosses` to get the suggested indices for misclassified images.
```
ds, idxs = DatasetFormatter().from_toplosses(learn)
ImageCleaner(ds, idxs, path)
```
[`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) does not change anything on disk (neither labels or existence of images). Instead, it creates a 'cleaned.csv' file in your data path from which you need to load your new databunch for the files to changes to be applied.
```
df = pd.read_csv(path/'cleaned.csv', header='infer')
# We create a databunch from our csv. We include the data in the training set and we don't use a validation set (DatasetFormatter uses only the training set)
np.random.seed(42)
db = (ImageList.from_df(df, path)
.split_none()
.label_from_df()
.databunch(bs=64))
learn = cnn_learner(db, models.resnet18, metrics=error_rate)
learn = learn.load('stage-1')
```
You can then use [`ImageCleaner`](/widgets.image_cleaner.html#ImageCleaner) again to find duplicates in the dataset. To do this, you can specify `duplicates=True` while calling ImageCleaner after getting the indices and dataset from `.from_similars`. Note that if you are using a layer's output which has dimensions <code>(n_batches, n_features, 1, 1)</code> then you don't need any pooling (this is the case with the last layer). The suggested use of `.from_similars()` with resnets is using the last layer and no pooling, like in the following cell.
```
ds, idxs = DatasetFormatter().from_similars(learn, layer_ls=[0,7,1], pool=None)
ImageCleaner(ds, idxs, path, duplicates=True)
show_doc(ImageDownloader)
```
[`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) widget gives you a way to quickly bootstrap your image dataset without leaving the notebook. It searches and downloads images that match the search criteria and resolution / quality requirements and stores them on your filesystem within the provided `path`.
Images for each search query (or label) are stored in a separate folder within `path`. For example, if you pupulate `tiger` with a `path` setup to `./data`, you'll get a folder `./data/tiger/` with the tiger images in it.
[`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) will automatically clean up and verify the downloaded images with [`verify_images()`](/vision.data.html#verify_images) after downloading them.
```
path = Config.data_path()/'image_downloader'
os.makedirs(path, exist_ok=True)
ImageDownloader(path)
```
#### Downloading images in python scripts outside Jupyter notebooks
```
path = Config.data_path()/'image_downloader'
files = download_google_images(path, 'aussie shepherd', size='>1024*768', n_images=30)
len(files)
show_doc(download_google_images)
```
After populating images with [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader), you can get a an [`ImageDataBunch`](/vision.data.html#ImageDataBunch) by calling `ImageDataBunch.from_folder(path, size=size)`, or using the data block API.
```
# Setup path and labels to search for
path = Config.data_path()/'image_downloader'
labels = ['boston terrier', 'french bulldog']
# Download images
for label in labels:
download_google_images(path, label, size='>400*300', n_images=50)
# Build a databunch and train!
src = (ImageList.from_folder(path)
.split_by_rand_pct()
.label_from_folder()
.transform(get_transforms(), size=224))
db = src.databunch(bs=16, num_workers=0)
learn = cnn_learner(db, models.resnet34, metrics=[accuracy])
learn.fit_one_cycle(3)
```
#### Downloading more than a hundred images
To fetch more than a hundred images, [`ImageDownloader`](/widgets.image_downloader.html#ImageDownloader) uses `selenium` and `chromedriver` to scroll through the Google Images search results page and scrape image URLs. They're not required as dependencies by default. If you don't have them installed on your system, the widget will show you an error message.
To install `selenium`, just `pip install selenium` in your fastai environment.
**On a mac**, you can install `chromedriver` with `brew cask install chromedriver`.
**On Ubuntu**
Take a look at the latest Chromedriver version available, then something like:
```
wget https://chromedriver.storage.googleapis.com/2.45/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
```
Note that downloading under 100 images doesn't require any dependencies other than fastai itself, however downloading more than a hundred images [uses `selenium` and `chromedriver`](/widgets.image_cleaner.html#Downloading-more-than-a-hundred-images).
`size` can be one of:
```
'>400*300'
'>640*480'
'>800*600'
'>1024*768'
'>2MP'
'>4MP'
'>6MP'
'>8MP'
'>10MP'
'>12MP'
'>15MP'
'>20MP'
'>40MP'
'>70MP'
```
## Methods
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(ImageCleaner.make_dropdown_widget)
show_doc(ImageCleaner.next_batch)
show_doc(DatasetFormatter.sort_idxs)
show_doc(ImageCleaner.make_vertical_box)
show_doc(ImageCleaner.relabel)
show_doc(DatasetFormatter.largest_indices)
show_doc(ImageCleaner.delete_image)
show_doc(ImageCleaner.empty)
show_doc(ImageCleaner.empty_batch)
show_doc(DatasetFormatter.comb_similarity)
show_doc(ImageCleaner.get_widgets)
show_doc(ImageCleaner.write_csv)
show_doc(ImageCleaner.create_image_list)
show_doc(ImageCleaner.render)
show_doc(DatasetFormatter.get_similars_idxs)
show_doc(ImageCleaner.on_delete)
show_doc(ImageCleaner.make_button_widget)
show_doc(ImageCleaner.make_img_widget)
show_doc(DatasetFormatter.get_actns)
show_doc(ImageCleaner.batch_contains_deleted)
show_doc(ImageCleaner.make_horizontal_box)
show_doc(DatasetFormatter.get_toplosses_idxs)
show_doc(DatasetFormatter.padded_ds)
```
## New Methods - Please document or move to the undocumented section
| true |
code
| 0.693564 | null | null | null | null |
|
# MaterialsCoord benchmarking – symmetry of bonding algorithms
Several near neighbor methods do not produce symmetrical bonding. For example, if site A is bonded to site B, it is not guaranteed that site B will be bonded to site A. In the MaterialsCoord benchmark we enforce symmetrical bonding for all algorithms. In this notebook, we assess how unsymmetrical the bonding is for each near neighbor method.
*Written using:*
- MaterialsCoord==0.2.0
*Authors: Alex Ganose (05/20/20)*
---
First, lets initialize the near neighbor methods we are interested in.
```
from pymatgen.analysis.local_env import BrunnerNN_reciprocal, EconNN, JmolNN, \
MinimumDistanceNN, MinimumOKeeffeNN, MinimumVIRENN, \
VoronoiNN, CrystalNN
nn_methods = [
MinimumDistanceNN(), MinimumOKeeffeNN(), MinimumVIRENN(), JmolNN(),
EconNN(tol=0.5), BrunnerNN_reciprocal(), VoronoiNN(tol=0.5), CrystalNN()
]
```
Next, import the benchmark and choose which structures we are interested in.
```
from materialscoord.core import Benchmark
structure_groups = ["common_binaries", "elemental", "A2BX4", "ABX3", "ABX4"]
bm = Benchmark.from_structure_group(structure_groups)
```
Enforcing symmetry always increases the number of assigned bonds. To assess the symmetry, we therefore calculate the number of additional bonds resulting from enforcing symmetrical bonding. Calculating the coordination number from a `StructureGraph` object (as returned by `NearNeighbors.get_bonded_structure()`) always enforces symmetry. In contrast, calculating the coordination number directly from the `NearNeighbors.get_cn()` method does not enforce symmetry.
```
import numpy as np
from tqdm.auto import tqdm
symmetry_results = []
no_symmetry_results = []
for nn_method in tqdm(nn_methods):
nn_symmetry_cns = []
nn_no_symmetry_cns = []
for structure in bm.structures.values():
bs = nn_method.get_bonded_structure(structure)
for site_idx in range(len(structure)):
nn_symmetry_cns.append(bs.get_coordination_of_site(site_idx))
nn_no_symmetry_cns.append(nn_method.get_cn(structure, site_idx))
symmetry_results.append(nn_symmetry_cns)
no_symmetry_results.append(nn_no_symmetry_cns)
symmetry_results = np.array(symmetry_results)
no_symmetry_results = np.array(no_symmetry_results)
import pandas as pd
symmetry_totals = symmetry_results.sum(axis=1)
no_symmetry_totals = no_symmetry_results.sum(axis=1)
no_symmetry_norm = no_symmetry_totals / symmetry_totals
symmetry_extra = 1 - no_symmetry_norm
symmetry_df = pd.DataFrame(
columns=[n.__class__.__name__ for n in nn_methods],
data=[no_symmetry_norm, symmetry_extra],
index=["without symmetry", "with symmetry"]
)
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
sns.set(font="Helvetica", font_scale=1.3, rc={"figure.figsize": (7, 7)})
sns.set_style("white", {"axes.edgecolor": "black", "axes.linewidth": 1.3})
plt.style.use({"mathtext.fontset": "custom", "mathtext.rm": "Arial", "axes.grid.axis": "x"})
symmetry_df = symmetry_df.rename(columns={"BrunnerNN_reciprocal": "BrunnerNN"})
ax = symmetry_df.T.plot(kind='bar', stacked=True)
ax.set_xticklabels(symmetry_df.columns, rotation=60)
ax.legend(frameon=False, loc="upper left", bbox_to_anchor=(1, 1))
ax.set(ylabel="Fraction of bonds assigned", xlabel="", ylim=(0, 1))
ax.tick_params(axis='y', which='major', size=10, width=1, color='k', left=True, direction="in")
plt.savefig(Path("plots/symmetry.pdf"), bbox_inches="tight")
plt.show()
! open .
```
| true |
code
| 0.641001 | null | null | null | null |
|
# Effective Data Visualization
## PyCon 2020
## Husni Almoubayyed [https://husni.space]
## Intro on packages:
- **Matplotlib and Seaborn**: Main plotting package in python is called matplotlib. Matplotlib is the base for another package which builds on top of it called Seaborn. We will use Seaborn when possible as it makes most things a lot more easier and allows us to achieve plots with sensible choices and significantly less lines of code. We will still use matplotlib for some things and it is important to understand every time Seaborn creates a plot it is calling Matplotlib in the background (it is also sometimes calling other things like statsmodels in the background to do some statistical calculations)
Matplotlib and Seaborn syntax is usually used as follows: plt. or sns.*typeofgraph*(arguments)
arguments are usually X and Y coordinates (or names of X and Y columns in a dataframe), colors, sizes, etc.
- **Pandas** is a library that handles [Pan]el [Da]ta. Basically it allows us to manipulate data in tables a lot more easily.
- **Numpy** is a python library that contains all the standard numerical operations you might wanna do
- **Sci-Kit Learn (sklearn)** is a widely used library that you can use to do most common non-deep machine learning methods.
## Intro to datasets:
We will use a few standard datasets throughout this tutorial. These can be imported from seaborn as will be shown later:
- **diamonds**: data on diamonds with prices, carats, color, clarity, cut, etc.
- **flights**: number of passengers in each month for each year for a few years in the ~50s
- **iris**: famous biology dataset that quantifies the morphologic variation of Iris flowers of three related species
- **titanic**: data on all titanic passengers including survival, age, ticket price paid, etc.
- **anscombe**: this is compiled of 4 different datasets that have the same first and second moments but look dramatically different
- **digits**: handwritten data of digits, used widely in machine learning
Other datasets that are not directly imported from seaborn:
- **financial data**: this will be requested in real time from yahoo finance using pandas.
- **CoViD-19 data**: https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv, credits to usafacts.org
## Installation Instructions
Install pip https://pip.pypa.io/en/stable/installing/
In the command line: $pip install notebook
or
Install conda https://www.anaconda.com/distribution/ (for Python >3.7)
and then run:
```
!pip install --upgrade matplotlib numpy scipy sklearn pandas seaborn plotly plotly-geo pandas_datareader
```
You might need to restart the kernel at this point to use any newly installed packages
Alternatively, you can go to bit.ly/PyConViz2020 to use a Colab hosted version of this notebook.
```
# import numpy and matplotlib and setup inline plots by running:
%pylab inline
import seaborn as sns
import pandas as pd
sns.set_style('darkgrid')
sns.set_context('notebook', font_scale=1.5)
sns.set_palette('colorblind')
# set the matplotlib backend to a higher-resolution option, on macOS, this is:
%config InlineBackend.figure_format = 'retina'
# set larger figure size for the rest of this notebook
matplotlib.rcParams['figure.figsize'] = 12, 8
```
## Data Exploration
```
anscombe = sns.load_dataset('anscombe')
for dataset in ['I','II','III','IV']:
print(anscombe[anscombe['dataset']==dataset].describe())
sns.lmplot(x='x', y='y', data=anscombe[anscombe['dataset']=='I'], height=8)
iris = sns.load_dataset('iris')
iris.head()
sns.scatterplot('petal_length', 'petal_width', hue='species', data=iris)
```
## Exercise:
On the same plot, fit 3 linear models for the 3 different iris species with the same x and y axes
```
sns.jointplot('petal_length', 'petal_width', data=iris, height=8, kind='kde')
sns.pairplot(iris, height=8, hue='species')
```
How about categorical data?
We can make boxplots and violin plots simply by running:
```
sns.catplot()
```
**Exercise:** Load up the flights dataset, plot a linear model of the passengers number as a function of year, one for each month of the year.
**Exercise:** Load up the diamonds dataset from seaborn. Plot the price as a function of carat, with different color grades colored differently. choose a small marker size and change the transparency (alpha agrument) to a smaller value than 1. Add some jitter to the x values to make them clearer.
**Exercise:** Load up the Titanic dataset from seaborn. Make a boxplot of the fare of the ticket paid against whether a person survived or not.
## Polar coordinates
```
plt.quiver??
X = np.random.uniform(0, 10, 100)
Y = np.random.uniform(0, 1, 100)
U = np.ones_like(X)
V = np.ones_like(Y)
f = plt.figure()
ax = f.add_subplot(111)
ax.quiver(X, Y, U, V, headlength=0, headaxislength=0, color='steelblue')
theta = np.linspace(0,2*np.pi,100)
r = np.linspace(0, 1, 100)
dr = 1
dt = 0
U = dr * cos(theta) - dt * sin (theta)
V = dr * sin(theta) + dt * cos(theta)
f = plt.figure()
ax = f.add_subplot(111, polar=True)
ax.quiver(theta, r, U, V, headlength=0, headaxislength=0, color='steelblue')
theta = np.linspace(0,2*np.pi,100)
r = np.random.uniform(0, 1, 100)
U = dr * cos(theta)
V = dr * sin(theta)
f = plt.figure()
ax = f.add_subplot(111, polar=True)
ax.quiver(theta, r, U, V, headlength=0, headaxislength=0, color='steelblue')
```
**Exercise 1:** radial plot with all sticks starting at a radius of 1
**Exercise 2:** all sticks are horizontal
**Exercise 3:** Use a 'mollweide' projection using the projection argument of add_subplot(). Use horizontal sticks now but make sure your sticks span the entire space.
# 2. Density Estimation
Often when we are making plots, we are trying to estimate the underlying distribution from which it was randomly drawn, this is known as Density Estimation in statistics. The simplest density estimator that does not make particular assumptions on the distribution of the data (we call this nonparametric) is the histogram.
## Histograms
```
# import out first dataset, an example from biology
iris = sns.load_dataset('iris')
iris.head()
data = iris['sepal_length']
plt.
data = iris['sepal_length']
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.
ax2.
```
Formally, The histogram estimator is $$ \hat{p}(x) = \frac{\hat{\theta_j}}{h} $$ where $$ \hat{\theta_j} = \frac{1}{n} \sum_{i=1}^n I(X_i \in B_j ) $$
We can calculate the mean squared error, which is a metric that tells us how well our estimator is, it turns out to be: $$MSE(x) = bias^2(x) + Var(x) = Ch^2 + \frac{C}{nh} $$
minimized by choosing $h = (\frac{C}{n})^{1/3}$, resulting in a risk (the expected value of the MSE) of:
$$ R = \mathcal{O}(\frac{1}{n})^{2/3}$$
This means that
- There is a bias-variance tradeoff when it comes to choosing the width of the bins, lower width ($h$), means more bias and less variance. There is no choice of $h$ that optimizes both.
- The risk goes down at a pretty slow rate as the number of datapoints increases, which begs the question, is there a better estimator that converges more quickly? The answer is yes, this is achieved by:
## Kernel Density Estimation
Kernels follow the conditions:
$$ K(x) \geq 0, \int K(x) dx = 1, \int x K(x) dx = 0$$
```
sns.
```
So how is this better than the histogram?
We can again calculate the MSE, which turns out to be:
$$MSE(x) = bias^2(x) + Var(x) = C_1h^4 + \frac{C_2}{nh}$$
minimized by choosing $ h = (\frac{C_1}{4nC_2})^{1/5} $, giving a risk of:
$$ R_{KDE} = \mathcal{O}(\frac{1}{n})^{4/5} < R_{histogram}$$
This still has a bias-variance tradeoff, but the estimator converges faster than in the case of histograms. Can we do even better? The answer is no, due to something in statistics called the minimax theorem.
**Exercise**: Instead of using just petal length, consider a 2D distribution with the two axes being petal length and petal width. Plot the distribution, the Histogram of the distribution and the KDE of the distribution. Make sure you play around with bin numbers and bandwidth to get a reasonably satisfying plot
```
data=iris[['petal_length', 'petal_width']]
sns.scatterplot('petal_length', 'petal_width', data=iris)
sns.distplot(iris['petal_length'])
```
# 3. Visualizing High Dimensional Datasets
```
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits
from sklearn.datasets import make_swiss_roll
import mpl_toolkits.mplot3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
from sklearn.manifold import TSNE
digits = load_digits()
shape(digits['data'])
```
## Principal Component Analysis
PCA computes the linear projections of greatest variance from the top eigenvectors of the data covariance matrix
Check out some more cool visualization of PCA at https://setosa.io/ev/principal-component-analysis/ and read more about the math and applications at https://www.cs.cmu.edu/~bapoczos/other_presentations/PCA_24_10_2009.pdf
**Exercise:** Use PCA to reduce the dimensionality of the digits dataset. Plot them color-coded by the different classes of digits.
### Failures of PCA
```
X, t = make_swiss_roll(1000, 0.05)
ward = AgglomerativeClustering(n_clusters=5,
connectivity=kneighbors_graph(X, n_neighbors=5, include_self=False),
linkage='ward').fit(X)
labels = ward.labels_
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for label in np.unique(labels):
ax.scatter(X[labels == label, 0], X[labels == label, 1], X[labels == label, 2])
pca = PCA(2)
projected = pca.fit_transform(X)
for label in np.unique(labels):
sns.scatterplot(projected[labels == label, 0], projected[labels == label, 1],
color=plt.cm.jet(float(label) / np.max(labels + 1)), marker='.')
```
## t-Distributed Stochastic Neighbor Embedding
Converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data.
First, t-SNE constructs a probability distribution over pairs of high-dimensional objects in such a way that similar objects have a high probability of being picked while dissimilar points have an extremely small probability of being picked. Second, t-SNE defines a similar probability distribution over the points in the low-dimensional map, and it minimizes the Kullback–Leibler divergence (KL divergence ) between the two distributions with respect to the locations of the points in the map.
For more details on t-SNE, check out the original paper http://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf
```
tSNE = TSNE(learning_rate=10,
perplexity=30)
projected = tSNE.fit_transform(X)
plt.scatter(projected[:, 0], projected[:, 1],
c=labels, alpha=0.3,
cmap=plt.cm.get_cmap('Paired', 5))
#plt.colorbar()
```
**Exercise:** Do this again for the digits dataset. Does this look better than PCA?
# 4. Interactive Visualization
```
# import libraries we're gonna use
import pandas_datareader.data as web
import datetime
import plotly.figure_factory as ff
import plotly.graph_objs as go
start = datetime.datetime(2008, 1, 1)
end = datetime.datetime(2018, 1, 1)
# This fetches the stock prices for the S%P 500 for the dates we selected from Yahoo Finance.
spy_df =
data = go.Scatter(x=spy_df.Date, y=spy_df.Close)
go.Figure(data)
```
**Exercise:** A candlestick chart is a powerful chart in finance that shows the starting price, closing price, highest price and lowerst price of a trading day. Create a aandlestick chart of the first 90 days of the data. You can find Candlestick in the 'go' module.
**Exercise:** It's hard to compare AAPL to SPY when viewed as is. Can you plot this again in a way that makes the returns of AAPL more easily comparable to the returns of the benchmark SPY?
```
covidf = pd.read_csv('~/Downloads/covid_confirmed_usafacts.csv',
dtype={"countyFIPS": str})
covidf.head()
values=covidf['4/5/20']
colorscale = ["#f7fbff","#deebf7","#c6dbef","#9ecae1",
"#6baed6","#4292c6","#2171b5","#08519c","#08306b"]
endpts = list(np.logspace(1, 5, len(colorscale) - 1))
fig = ff.create_choropleth(
fips=covidf['countyFIPS'], values=covidf['4/9/20'],# scope=['usa'],
binning_endpoints=endpts, colorscale=colorscale,
title_text = 'CoViD-19 Confirmed cases as of 4/9/20',
legend_title = '# of cases'
)
go.Figure(fig)
```
Many more types of plotly charts are available with examples here https://plotly.com/python/
# Effective Communication through Plotting
```
image = [[i for i in range(100)]]*10
sns.heatmap(image, cmap='jet', square=True)
```
## Color
```
# code snippet from Jake Vandeplas https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
flights = sns.load_dataset("flights").pivot("month", "year", "passengers")
sns.heatmap(flights, cmap='jet')
sns.heatmap(flights, cmap=grayify_cmap('jet'))
```
## 3 Types of Viable Color palettes/colormaps:
### 1. Perceptually uniform sequential
```
sns.heatmap(flights, cmap='viridis')
sns.heatmap(flights, cmap='Purples')
```
## 2. Diverging
```
import pandas as pd
pit_climate_df = pd.DataFrame(
dict(Month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],
High = [2, 3, 10, 16, 22, 27, 28, 28, 24, 17, 10, 5],
Low = [-7, -5, 0, 5, 11, 16, 18, 18, 14,7, 3, -2])
)
pit_climate_df.head()
sns.heatmap(pit_climate_df[['High', 'Low']].T,
cmap='coolwarm',
center=0,#np.mean(pit_climate_df[['High', 'Low']].mean().mean()),
square=True,
xticklabels=pit_climate_df['Month'])
```
## 3. Categorical
example from before:
```
plt.scatter(projected[:, 0], projected[:, 1],
c=digits.target, alpha=0.3,
cmap=plt.cm.get_cmap('Paired', 10))
plt.colorbar()
from IPython.display import Image
Image('Resources/51417489_2006270206137719_6713863014199590912_n.png')
Image('Resources/50283372_1999138550184218_5288878489854803968_o.png')
```
You can also specify a color palette to use for the rest of a notebook or script by running
Other things to consider:
* Use salient marker types, full list at https://matplotlib.org/3.2.1/api/markers_api.html
```
d1 = np.random.uniform(-2.5, 2.5, (100, 100))
d2 = np.random.randn(5,5)
sns.scatterplot(d1[:,0], d1[:,1], marker='+', color='steelblue')
sns.scatterplot(d2[:,0], d2[:,1], color='steelblue')
sns.lmplot('petal_length', 'petal_width', iris,
height=10,
hue='species',
markers=['1','2','3'],
fit_reg=False)
sns.scatterplot(d1[:,0], d1[:,1], marker='+', color='steelblue')
```
There are more than 2 axes on a 2-dimensional screen. Can you think of ways to include more axes?
We can use each of the following to map to an axis:
- color
- size (for numerical data)
- shape (for categorical data)
- literally making a 3D plot (as in the swiss roll, useful in the case of 3 spatial dimensions)
```
sns.set_palette('colorblind')
```
Read more on choosing colors at:
* https://seaborn.pydata.org/tutorial/color_palettes.html
* https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
One of my favorite resources on clarity in plotting:
* http://blogs.nature.com/methagora/2013/07/data-visualization-points-of-view.html
New interesting package that we don't have time for today but is definitely worth mentioning. Makes visualization more intuitive by making it declarative is Altair https://altair-viz.github.io
| true |
code
| 0.659898 | null | null | null | null |
|
# Mask R-CNN - Train on Shapes Dataset
This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
```
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
%matplotlib inline
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
```
## Configurations
```
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 3 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = ShapesConfig()
config.display()
```
## Notebook Preferences
```
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
```
## Dataset
Create a synthetic dataset
Extend the Dataset class and add a method to load the shapes dataset, `load_shapes()`, and override the following methods:
* load_image()
* load_mask()
* image_reference()
```
class ShapesDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_shapes(self, count, height, width):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("shapes", 1, "square")
self.add_class("shapes", 2, "circle")
self.add_class("shapes", 3, "triangle")
# Add images
# Generate random specifications of images (i.e. color and
# list of shapes sizes and locations). This is more compact than
# actual images. Images are generated on the fly in load_image().
for i in range(count):
bg_color, shapes = self.random_image(height, width)
self.add_image("shapes", image_id=i, path=None,
width=width, height=height,
bg_color=bg_color, shapes=shapes)
def load_image(self, image_id):
"""Generate an image from the specs of the given image ID.
Typically this function loads the image from a file, but
in this case it generates the image on the fly from the
specs in image_info.
"""
info = self.image_info[image_id]
bg_color = np.array(info['bg_color']).reshape([1, 1, 3])
image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)
image = image * bg_color.astype(np.uint8)
for shape, color, dims in info['shapes']:
image = self.draw_shape(image, shape, dims, color)
return image
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
shapes = info['shapes']
count = len(shapes)
mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)
for i, (shape, _, dims) in enumerate(info['shapes']):
mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),
shape, dims, 1)
# Handle occlusions
occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
for i in range(count-2, -1, -1):
mask[:, :, i] = mask[:, :, i] * occlusion
occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
# Map class names to class IDs.
class_ids = np.array([self.class_names.index(s[0]) for s in shapes])
return mask, class_ids.astype(np.int32)
def draw_shape(self, image, shape, dims, color):
"""Draws a shape from the given specs."""
# Get the center x, y and the size s
x, y, s = dims
if shape == 'square':
cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)
elif shape == "circle":
cv2.circle(image, (x, y), s, color, -1)
elif shape == "triangle":
points = np.array([[(x, y-s),
(x-s/math.sin(math.radians(60)), y+s),
(x+s/math.sin(math.radians(60)), y+s),
]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def random_shape(self, height, width):
"""Generates specifications of a random shape that lies within
the given height and width boundaries.
Returns a tuple of three valus:
* The shape name (square, circle, ...)
* Shape color: a tuple of 3 values, RGB.
* Shape dimensions: A tuple of values that define the shape size
and location. Differs per shape type.
"""
# Shape
shape = random.choice(["square", "circle", "triangle"])
# Color
color = tuple([random.randint(0, 255) for _ in range(3)])
# Center x, y
buffer = 20
y = random.randint(buffer, height - buffer - 1)
x = random.randint(buffer, width - buffer - 1)
# Size
s = random.randint(buffer, height//4)
return shape, color, (x, y, s)
def random_image(self, height, width):
"""Creates random specifications of an image with multiple shapes.
Returns the background color of the image and a list of shape
specifications that can be used to draw the image.
"""
# Pick random background color
bg_color = np.array([random.randint(0, 255) for _ in range(3)])
# Generate a few random shapes and record their
# bounding boxes
shapes = []
boxes = []
N = random.randint(1, 4)
for _ in range(N):
shape, color, dims = self.random_shape(height, width)
shapes.append((shape, color, dims))
x, y, s = dims
boxes.append([y-s, x-s, y+s, x+s])
# Apply non-max suppression wit 0.3 threshold to avoid
# shapes covering each other
keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)
shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]
return bg_color, shapes
# Training dataset
dataset_train = ShapesDataset()
dataset_train.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_train.prepare()
# Validation dataset
dataset_val = ShapesDataset()
dataset_val.load_shapes(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_val.prepare()
# Load and display random samples
image_ids = np.random.choice(dataset_train.image_ids, 4)
for image_id in image_ids:
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
```
## Ceate Model
```
# Create model in training mode
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "coco" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model.load_weights(model.find_last()[1], by_name=True)
```
## Training
Train in two stages:
1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.
2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers.
```
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=1,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=2,
layers="all")
# Save weights
# Typically not needed because callbacks save after every epoch
# Uncomment to save manually
# model_path = os.path.join(MODEL_DIR, "mask_rcnn_shapes.h5")
# model.keras_model.save_weights(model_path)
```
## Detection
```
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Test on a random image
image_id = random.choice(dataset_val.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
results = model.detect([original_image], verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset_val.class_names, r['scores'], ax=get_ax())
```
## Evaluation
```
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
image_ids = np.random.choice(dataset_val.image_ids, 10)
APs = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id,
r["rois"], r["class_ids"], r["scores"])
APs.append(AP)
print("mAP: ", np.mean(APs))
```
| true |
code
| 0.726018 | null | null | null | null |
|
```
! nvidia-smi
```
# Introduction
This notebook holds the code for the [Involution](https://arxiv.org/abs/2103.06255) layer in tesorflow. The idea behind this layer is to invert the inherent properties of Convolution. Where convolution is spatial-agnostic and channel-specific, involution is spatial-specific and channel-agnostic.
# Imports
```
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
```
# Convolution
To understand involution we need to first understand convolution. Let us consider $X\in\mathbb{R}^{H\times W\times C_{inp}}$ denote the input feature map where $H, W$ represent its height and width and $C_{inp}$ be its channel size. A collection of $C_{out}$ number of convolution filters with fixed kernel size of $K \times K$ is denoted as $\mathcal{F}\in\mathbb{R}^{C_{out}\times C_{inp}\times K\times K}$.
The filters perform a Multiply-Add operation on the input feature map in a sliding window manner to yeild the output feature map $Y\in \mathbb{R}^{H\times W\times C_{out}}$.
# Involution
Involution kernels $\mathcal{H}\in \mathbb{R}^{H\times W\times K\times K\times G}$ are devised to oprate in a symettrically oppposite manner as that of the convolution kernels. Observing the shape of the involution kernels we observe the following things:
- Each pixel of the input feature map is entitled to get its own involution kernel.
- Each kernel is of the shape of $K\times K\times G$.
- The output $Y$ will be of the same shape as that of the input feature map $X$.
The problem with involution is that we cannot define a fixed shaped kernel, that would hurt resolution independence in the neural network. This thought led the researchers to conceptualize a generation function $\phi$ that generates the involution kernels conditioned on the original input tensor.
$$
\mathcal{H}_{ij}=\phi{(X_{ij})}\\
\mathcal{H}_{ij}=W_{1}\sigma{(W_{0}X_{ij})}\\
$$
```
class Involution(tf.keras.layers.Layer):
def __init__(self, channel, group_number, kernel_size, stride, reduction_ratio):
super().__init__()
# The assert makes sure that the user knows about the
# reduction size. We cannot have 0 filters in Conv2D.
assert reduction_ratio <= channel, print("Reduction ration must be less than or equal to channel size")
self.channel = channel
self.group_number = group_number
self.kernel_size = kernel_size
self.stride = stride
self.reduction_ratio = reduction_ratio
self.o_weights = tf.keras.layers.AveragePooling2D(
pool_size=self.stride,
strides=self.stride,
padding="same") if self.stride > 1 else tf.identity
self.kernel_gen = tf.keras.Sequential([
tf.keras.layers.Conv2D(
filters=self.channel//self.reduction_ratio,
kernel_size=1),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Conv2D(
filters=self.kernel_size*self.kernel_size*self.group_number,
kernel_size=1)
])
def call(self, x):
_, H, W, C = x.shape
H = H//self.stride
W = W//self.stride
# Extract input feature blocks
unfolded_x = tf.image.extract_patches(
images=x,
sizes=[1,self.kernel_size,self.kernel_size,1],
strides=[1,self.stride,self.stride,1],
rates=[1,1,1,1],
padding="SAME") # B, H, W, K*K*C
unfolded_x = tf.keras.layers.Reshape(
target_shape=(H,
W,
self.kernel_size*self.kernel_size,
C//self.group_number,
self.group_number)
)(unfolded_x) # B, H, W, K*K, C//G, G
# generate the kernel
kernel_inp = self.o_weights(x)
kernel = self.kernel_gen(kernel_inp) # B, H, W, K*K*G
kernel = tf.keras.layers.Reshape(
target_shape=(H,
W,
self.kernel_size*self.kernel_size,
1,
self.group_number)
)(kernel) # B, H, W, K*K, 1, G
# Multiply-Add op
out = tf.math.multiply(kernel, unfolded_x) # B, H, W, K*K, C//G, G
out = tf.math.reduce_sum(out, axis=3) # B, H, W, C//G, G
out = tf.keras.layers.Reshape(
target_shape=(H,
W,
C)
)(out) # B, H, W, C
return out
```
# Comparison
In this section we will try and emulate [TensorFlow's tutorial on CIFAR classification](https://www.tensorflow.org/tutorials/images/cnn). Here we build one model with convolutional layers while the other will be based on involuitonal layers.
```
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(256).batch(256)
test_ds = tf.data.Dataset.from_tensor_slices( (test_images, test_labels)).batch(256)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
```
## Convolutional Neural Network
```
convolution_model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3), padding="same"),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding="same"),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding="same"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
convolution_model.summary()
convolution_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
conv_history = convolution_model.fit(
train_ds,
epochs=10,
validation_data=test_ds
)
```
### Loss plot
```
plt.plot(conv_history.history["loss"], label="loss")
plt.plot(conv_history.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
```
### Accuracy plot
```
plt.plot(conv_history.history["accuracy"], label="acc")
plt.plot(conv_history.history["val_accuracy"], label="val_acc")
plt.legend()
plt.show()
```
# Involutional Neural Network
```
involution_model = tf.keras.models.Sequential([
Involution(channel=3,group_number=1,kernel_size=3,stride=1,reduction_ratio=2),
tf.keras.layers.ReLU(name="relu1"),
tf.keras.layers.MaxPooling2D((2, 2)),
Involution(channel=3,group_number=1,kernel_size=3,stride=1,reduction_ratio=2),
tf.keras.layers.ReLU(name="relu2"),
tf.keras.layers.MaxPooling2D((2, 2)),
Involution(channel=3,group_number=1,kernel_size=3,stride=1,reduction_ratio=2),
tf.keras.layers.ReLU(name="relu3"),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
involution_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
inv_history = involution_model.fit(
train_ds,
epochs=10,
validation_data=test_ds
)
involution_model.summary()
```
### Loss Plot
```
plt.plot(inv_history.history["loss"], label="loss")
plt.plot(inv_history.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
```
### Accuracy Plot
```
plt.plot(inv_history.history["accuracy"], label="acc")
plt.plot(inv_history.history["val_accuracy"], label="val_acc")
plt.legend()
plt.show()
```
### Observation
A fun little experiment is to see the activation maps of the involution kernel.
```
layer_names = ["relu1","relu2","relu3"]
outputs = [involution_model.get_layer(name).output for name in layer_names]
vis_model = tf.keras.Model(involution_model.input, outputs)
fig, axes = plt.subplots(nrows=10, ncols=4, figsize=(10, 20))
[ax.axis("off") for ax in np.ravel(axes)]
for (ax_orig, ax_relu1, ax_relu2, ax_relu3), test_image in zip(axes, test_images[:10]):
relu_images_list = vis_model.predict(tf.expand_dims(test_image,0))
ax_orig.imshow(tf.clip_by_value(test_image, 0, 1))
ax_orig.set_title("Input Image")
ax_relu1.imshow(tf.clip_by_value(relu_images_list[0].squeeze(), 0, 1))
ax_relu1.set_title("ReLU 1")
ax_relu2.imshow(tf.clip_by_value(relu_images_list[1].squeeze(), 0, 1))
ax_relu2.set_title("ReLU 2")
ax_relu3.imshow(tf.clip_by_value(relu_images_list[2].squeeze(), 0, 1))
ax_relu3.set_title("ReLU 3")
```
| true |
code
| 0.785525 | null | null | null | null |
|
# Section 4.3 : CYCLICAL MOMENTUM
## Summary
- Learning rate and momentum are closely dependent, and both must be optimised
- Momentum should be set as high as possible without causing instabilities in training
- Momentum cannot be optimised in a similar way to LR, by using a momentum finder
- Optimum settings found to be use of cyclical LR (initially increasing) and cyclical momentum (initially decreasing)
- If constant LR is used, a large (but not too large), constant momentum should be used
- Too large a constant momentum results in instabilities, which are visible in early training
## Momentum in SGD
SGD parameter updates:
$\theta_{iter+1} = \theta_{iter}− \epsilon\delta L(F(x,\theta),\theta),$
where $\theta$ are the parameters, $\epsilon$ is the learning rate, and $L(F(x,\theta),\theta)$ is the gradient.
Momentum modifies the update rule to:
$\nu_{iter+1} = \alpha\nu_{iter}− \epsilon\delta L(F(x,\theta),\theta)$
$\theta_{iter+1} = \theta_{iter}+\nu_{iter},$
where $\nu$ is velocity, and $\alpha$ is the momentum coefficient, i.e. the effect of $\alpha$ on the update is of the same scale as $\epsilon$.
## Cyclical momentum Example
Let's take the same model and train a few different configurations fo learning rate and momentum:
```
%matplotlib inline
from __future__ import division
import sys
import os
sys.path.append('../')
from Modules.Basics import *
from Modules.Class_Basics import *
data, features = importData()
nFolds = 5
preprocParams = {'normIn':True, 'pca':False}
compileArgs = {'loss':'binary_crossentropy', 'optimizer':'sgd', 'depth':3, 'width':128, 'lr':5e2}
trainParams = {'epochs':20, 'batch_size':256, 'verbose':0}
```
### Constant LR, Constant Momentum
```
from pathlib import Path
import os
results_ConstLR_ConstMom85, history_ConstLR_ConstMom85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.85}},
trainParams, useEarlyStop=False, plot=False)
results_ConstLR_ConstMom90, history_ConstLR_ConstMom90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.90}},
trainParams, useEarlyStop=False, plot=False)
results_ConstLR_ConstMom95, history_ConstLR_ConstMom95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.95}},
trainParams, useEarlyStop=False, plot=False)
results_ConstLR_ConstMom99, history_ConstLR_ConstMom99 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.99}},
trainParams, useEarlyStop=False, plot=False)
getModelHistoryComparisonPlot([history_ConstLR_ConstMom85, history_ConstLR_ConstMom90, history_ConstLR_ConstMom95, history_ConstLR_ConstMom99],
['LR=500, Mom=0.85', 'LR=500, Mom=0.90', 'LR=500, Mom=0.95', 'LR=500, Mom=0.99'], cv=True)
```
Similar to the paper, we see that using a constant learning rate requires high values of momentum to converge quickly: as the coefficient is increased, the networks reach their minima in fewer and fewer epochs. At very high momenta (<span style="color:red">red</span>), the network eventually overfits and starts diverging. However it shows slight instability in it's early stages of training, which (as the paper suggests) could be used to catch the eventual overfitting early, and adjust the coefficient.
### Constant LR, Cyclical Momentum
```
stepScale = 4
results_ConstLR_CycMom95_85, history_ConstLR_CycMom95_85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCMom':{'maxMom':0.95,'minMom':0.85,
'scale':stepScale, 'plotMom':False}})
results_ConstLR_CycMom99_90, history_ConstLR_CycMom99_90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCMom':{'maxMom':0.99,'minMom':0.90,
'scale':stepScale, 'plotMom':False}})
results_ConstLR_CycMom99_95, history_ConstLR_CycMom99_95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCMom':{'maxMom':0.99,'minMom':0.95,
'scale':stepScale, 'plotMom':False}})
getModelHistoryComparisonPlot([history_ConstLR_CycMom95_85, history_ConstLR_CycMom99_90, history_ConstLR_CycMom99_95, history_ConstLR_ConstMom99],
['LR=500, Cyclical mom [0.95-0.85]', 'LR=500, Cyclical mom [0.99-0.90]', 'LR=500, Cyclical mom [0.99-0.95]', 'LR=500, Mom=0.99'], cv=True)
```
Here we can see that using a cyclical momentum schedule can be quite unstable (loss fluctuates, possibly an artifact of stepisize), but does provide some resistance to overfitting (late test loss is slow to rise).
Comparing to a constant momentum of 0.99 (<span style="color:red">red</span>) to a cyclical momentum between 0.99 and 0.95 <span style="color:green">green</span>, we can see that the cycling supresses the rise in test loss in late training, and achieves better minima in loss. Initial training is also better, however the artifacts of the scheduling cause mild divergence around epochs 7 and 15, preventing the network from convereging earlier than might otherwise be possible.
As the width of the cycle is increased (<span style="color:green">green</span> to <span style="color:orange">orange</span>), these artifacts become more apparent as the mild diveregnces become sharp spikes.
### Cyclical LR, Constant Momentum
```
stepScale = 4
results_CycLR_ConstMom85, history_CycLR_ConstMom85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.85}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
results_CycLR_ConstMom90, history_CycLR_ConstMom90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.90}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
results_CycLR_ConstMom95, history_CycLR_ConstMom95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.95}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
results_CycLR_ConstMom99, history_CycLR_ConstMom99 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':{**compileArgs, 'momentum':0.99}},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False}})
getModelHistoryComparisonPlot([history_CycLR_ConstMom85, history_CycLR_ConstMom90, history_CycLR_ConstMom95, history_CycLR_ConstMom99, history_ConstLR_ConstMom99],
['Cyclical LR [50-500], mom=0.85', 'Cyclical LR [50-500], mom=0.90', 'Cyclical LR [50-500], mom=0.95', 'Cyclical LR [50-500], mom=0.99', 'LR=500, Mom=0.99'], cv=True)
```
Here we see that moving to a cyclical LR schedule might help reduce the instability of using very high momenta. Comparing <span style="color:red">red</span> to <span style="color:purple">purple</span>, we find that the cyclical LR gives a slightly smoother loss evolution, reaches a better loss, and supresses the late-stage overfitting.
### Cyclical LR, cyclical Momentum
```
stepScale = 4
results_CycLR_CycMom95_85, history_CycLR_CycMom95_85 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False},
'LinearCMom':{'maxMom':0.95,'minMom':0.85,
'scale':stepScale, 'plotMom':False}})
results_CycLR_CycMom99_90, history_CycLR_CycMom99_90 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False},
'LinearCMom':{'maxMom':0.99,'minMom':0.90,
'scale':stepScale, 'plotMom':False}})
results_CycLR_CycMom99_95, history_CycLR_CycMom99_95 = cvTrainClassifier(data, features, nFolds, preprocParams,
{'version':'modelRelu', 'nIn':len(features),
'compileArgs':compileArgs},
trainParams, useEarlyStop=False, plot=False,
useCallbacks={'LinearCLR':{'maxLR':5e2,'minLR':5e1,
'scale':stepScale, 'plotLR':False},
'LinearCMom':{'maxMom':0.99,'minMom':0.95,
'scale':stepScale, 'plotMom':False}})
getModelHistoryComparisonPlot([history_CycLR_CycMom95_85, history_CycLR_CycMom99_90, history_CycLR_CycMom99_95, history_ConstLR_CycMom99_95],
['Cyclical LR [50-500], Cyclical Mom [0.95-0.85]', 'Cyclical LR [50-500], Cyclical Mom [0.99-0.90]', 'Cyclical LR [50-500], Cyclical Mom [0.99-0.95]', 'LR=500, Cyclical Mom [0.99-0.95]'], cv=True)
```
Comparing the best CLR+CM setup (<span style="color:green">green</span>) to the fixed LR+CM setup (<span style="color:red">red</span>) it seems that cycling the lR degrades the performance of the network (best loss is higher), however the network never overfits; unlink the <span style="color:red">red</span> line, it reaches it's minima after 7 epochs and then plateus. It's possible that the stability might actually be a consequence of underfitting, in which case the learning rate could perhaps be increased.
### Comparison
```
getModelHistoryComparisonPlot([history_ConstLR_ConstMom99, history_ConstLR_CycMom99_95, history_CycLR_ConstMom99, history_CycLR_CycMom99_95],
['LR=500, Mom=0.99', 'LR=500, Cyclical Mom [0.99-0.95]', 'Cyclical LR [50-500], Mom=0.99', 'Cyclical LR [50-500], Cyclical LR [0.99-0.95]'], cv=True)
```
Comparing the best performing setups from each sechudle configuration it seems that of the hyperparameters tested, for this dataset and architecture, a cycled LR with a constant momentum (<span style="color:green">green</span>) provides the lowest loss, but eventually overfits.
Cycling the momentum and keeping the LR constant (<span style="color:orange">orange</span>) reaches almost as good a loss, but after 40% more epochs, and although it later provides less overfitting, it does suffer from regular peaks and troughs due to the cycling.
Cycling both the LR and the momentum (<span style="color:red">red</span>) causes convergence in the same number of epochs as <span style="color:green">green</span>, but at a higher loss. Having reached its minimum, the test loss then remains flat, possibly indicating that with further adjustments of the hyperparameters it might provide superior performance to <span style="color:green">green</span>.
| true |
code
| 0.62581 | null | null | null | null |
|
```
__author__ = 'Tilii: https://kaggle.com/tilii7'
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.cm as cm
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
```
Simply loading the files without any transformation. If you wish to manipulate the data in any way, it should be done here before doing dimensionality reduction in subsequent steps.
```
print('\nLoading files ...')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
X = train.drop(['id', 'target'], axis=1).values
y = train['target'].values.astype(np.int8)
target_names = np.unique(y)
print('\nThere are %d unique target valuess in this dataset:' % (len(target_names)), target_names)
```
Principal Component Analysis (**[PCA](https://en.wikipedia.org/wiki/Principal_component_analysis)**) identifies the combination of components (directions in the feature space) that account for the most variance in the data.
```
n_comp = 20
# PCA
print('\nRunning PCA ...')
pca = PCA(n_components=n_comp, svd_solver='full', random_state=1001)
X_pca = pca.fit_transform(X)
print('Explained variance: %.4f' % pca.explained_variance_ratio_.sum())
print('Individual variance contributions:')
for j in range(n_comp):
print(pca.explained_variance_ratio_[j])
```
Better than 90% of the data is explained by a single principal component. Just a shade under 99% of variance is explained by 15 components, which means that this dataset can be safely reduced to ~15 features.
Here we plot our 0/1 samples on the first two principal components.
```
colors = ['blue', 'red']
plt.figure(1, figsize=(10, 10))
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(X_pca[y == i, 0], X_pca[y == i, 1], color=color, s=1,
alpha=.8, label=target_name, marker='.')
plt.legend(loc='best', shadow=False, scatterpoints=3)
plt.title(
"Scatter plot of the training data projected on the 1st "
"and 2nd principal components")
plt.xlabel("Principal axis 1 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[0] * 100.0))
plt.ylabel("Principal axis 2 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[1] * 100.0))
plt.savefig('pca-porto-01.png', dpi=150)
plt.show()
```
There is a nice separation between various groups of customers, but not so between 0/1 categories within each group. This is somewhat exaggerated by the fact that "0" points (blue) are plotted first and "1" points (red) are plotted last. There seems to be more red than blue in that image, even though there are >25x "0" points in reality. I'd be grateful if someone knows how to plot this in a way that would not create this misleading impression.
Regardless, 0/1 points are not separated well at all. That means that they will not be easy to classify, which we all know by now.
**[t-SNE](https://lvdmaaten.github.io/tsne/)** could potentially lead to better data separation/visualization, because unlike PCA it preserves the local structure of data points. The problem with sklearn implementation of t-SNE is its lack of memory optimization. I am pretty sure that the t-SNE code at the very bottom will lead to memory errors on most personal computers, but I leave it commented out if anyone wants to try.
Instead, I ran t-SNE using a much faster and more memory-friendly commandline version, which can be found at the link above.
Here is the output of that exercise:

Again, we can see clear separation between different groups of customers. Some groups even have a nice "coffee bean" structure where two subgroups can be identified (gender?). Alas, there is no clear separation between 0/1 categories.
In strictly technical terms, we are screwed :D
```
# tsne = TSNE(n_components=2, init='pca', random_state=1001, perplexity=30, method='barnes_hut', n_iter=1000, verbose=1)
# X_tsne = tsne.fit_transform(X) # this will either fail or take a while (most likely overnight)
# plt.figure(2, figsize=(10, 10))
# for color, i, target_name in zip(colors, [0, 1], target_names):
# plt.scatter(X_tsne[y == i, 0], X_tsne[y == i, 1], color=color, s=1,
# alpha=.8, label=target_name, marker='.')
# plt.legend(loc='best', shadow=False, scatterpoints=3)
# plt.title('Scatter plot of t-SNE embedding')
# plt.xlabel('X')
# plt.ylabel('Y')
# plt.savefig('t-SNE-porto-01.png', dpi=150)
# plt.show()
```
It was kindly brought up to me that a strange-looking PCA plot above is probably because of categorical variables in this dataset. I leave the original plot up there for posterity.
Let's encode the categorical variables and try again.
```
from sklearn.preprocessing import MinMaxScaler
def scale_data(X, scaler=None):
if not scaler:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
X = train.drop(['id', 'target'], axis=1)
test.drop(['id'], axis=1, inplace=True)
n_train = X.shape[0]
train_test = pd.concat((X, test)).reset_index(drop=True)
col_to_drop = X.columns[X.columns.str.endswith('_cat')]
col_to_dummify = X.columns[X.columns.str.endswith('_cat')].astype(str).tolist()
for col in col_to_dummify:
dummy = pd.get_dummies(train_test[col].astype('category'))
columns = dummy.columns.astype(str).tolist()
columns = [col + '_' + w for w in columns]
dummy.columns = columns
train_test = pd.concat((train_test, dummy), axis=1)
train_test.drop(col_to_dummify, axis=1, inplace=True)
train_test_scaled, scaler = scale_data(train_test)
X = np.array(train_test_scaled[:n_train, :])
test = np.array(train_test_scaled[n_train:, :])
print('\n Shape of processed train data:', X.shape)
print(' Shape of processed test data:', test.shape)
```
Repeating PCA and making another plot of the first two principal components.
```
print('\nRunning PCA again ...')
pca = PCA(n_components=n_comp, svd_solver='full', random_state=1001)
X_pca = pca.fit_transform(X)
print('Explained variance: %.4f' % pca.explained_variance_ratio_.sum())
print('Individual variance contributions:')
for j in range(n_comp):
print(pca.explained_variance_ratio_[j])
plt.figure(1, figsize=(10, 10))
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(X_pca[y == i, 0], X_pca[y == i, 1], color=color, s=1,
alpha=.8, label=target_name, marker='.')
plt.legend(loc='best', shadow=False, scatterpoints=3)
plt.title(
"Scatter plot of the training data projected on the 1st "
"and 2nd principal components")
plt.xlabel("Principal axis 1 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[0] * 100.0))
plt.ylabel("Principal axis 2 - Explains %.1f %% of the variance" % (
pca.explained_variance_ratio_[1] * 100.0))
plt.savefig('pca-porto-02.png', dpi=150)
plt.show()
```
I think that's a better plot visually and there is a good number of well-defined clusters, but still no clear separation between 0/1 points.
We can red-do the t-SNE plot as well using modified dataset. **Don't try this at home** - it takes 24+ hours using a commandline version of bh_tsne.
Anyway, here is the new t-SNE plot:

Again, lots of interesting clusters, but blue and red dots overlap for the most part.
This just happens to be a difficult classification classification problem, so maybe it is not a big surprise that raw data does not contain enough info for t-SNE to distinguish clearly between the classes.
Unfortunately, it is not much better even after training. Below is a t-SNE plot of activations from the last hidden layer (3rd) of a neural network that was trained on this dataset for 80 epochs. If you download the full version (it is roughly 10.5 x 10.5 inches), you may be able to see better that lots of red dots are concetrated in the lower left quadrat (6-9 on a clock dial), and that there are clearly fewer red dots in the upper right quadrant (0-3 on a clock dial). So the network has succeded somewhat in sequestering the red dots, but they still overlap quite a bit with blue ones.

Later I will have more t-SNE plots from neural network activations in [__this kernel__](https://www.kaggle.com/tilii7/keras-averaging-runs-gini-early-stopping).
| true |
code
| 0.640383 | null | null | null | null |
|
**This notebook is an exercise in the [Pandas](https://www.kaggle.com/learn/pandas) course. You can reference the tutorial at [this link](https://www.kaggle.com/residentmario/summary-functions-and-maps).**
---
# Introduction
Now you are ready to get a deeper understanding of your data.
Run the following cell to load your data and some utility functions (including code to check your answers).
```
import pandas as pd
pd.set_option("display.max_rows", 5)
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
from learntools.core import binder; binder.bind(globals())
from learntools.pandas.summary_functions_and_maps import *
print("Setup complete.")
reviews.head()
```
# Exercises
## 1.
What is the median of the `points` column in the `reviews` DataFrame?
```
median_points = reviews.points.median()
# Check your answer
q1.check()
#q1.hint()
q1.solution()
```
## 2.
What countries are represented in the dataset? (Your answer should not include any duplicates.)
```
countries = reviews.country.unique()
# Check your answer
q2.check()
#q2.hint()
q2.solution()
```
## 3.
How often does each country appear in the dataset? Create a Series `reviews_per_country` mapping countries to the count of reviews of wines from that country.
```
reviews_per_country = reviews.country.value_counts()
# Check your answer
q3.check()
#q3.hint()
q3.solution()
```
## 4.
Create variable `centered_price` containing a version of the `price` column with the mean price subtracted.
(Note: this 'centering' transformation is a common preprocessing step before applying various machine learning algorithms.)
```
centered_price = reviews.price-reviews.price.mean()
# Check your answer
q4.check()
q4.hint()
q4.solution()
```
## 5.
I'm an economical wine buyer. Which wine is the "best bargain"? Create a variable `bargain_wine` with the title of the wine with the highest points-to-price ratio in the dataset.
```
bargain_idx = (reviews.points / reviews.price).idxmax()
bargain_wine = reviews.loc[bargain_idx, 'title']
# Check your answer
q5.check()
#q5.hint()
q5.solution()
```
## 6.
There are only so many words you can use when describing a bottle of wine. Is a wine more likely to be "tropical" or "fruity"? Create a Series `descriptor_counts` counting how many times each of these two words appears in the `description` column in the dataset.
```
n_trop = reviews.description.map(lambda desc: "tropical" in desc).sum()
n_fruity = reviews.description. map (lambda desc: "fruity" in desc).sum()
descriptor_counts = pd.Series([n_trop, n_fruity], index = ['tropical', 'fruity'])
# Check your answer
q6.check()
#q6.hint()
q6.solution()
```
## 7.
We'd like to host these wine reviews on our website, but a rating system ranging from 80 to 100 points is too hard to understand - we'd like to translate them into simple star ratings. A score of 95 or higher counts as 3 stars, a score of at least 85 but less than 95 is 2 stars. Any other score is 1 star.
Also, the Canadian Vintners Association bought a lot of ads on the site, so any wines from Canada should automatically get 3 stars, regardless of points.
Create a series `star_ratings` with the number of stars corresponding to each review in the dataset.
```
def stars (row):
if row.country == 'Canada':
return 3
elif row.points >= 95:
return 3
elif row.points >= 85:
return 2
else:
return 1
star_ratings = reviews.apply(stars, axis ='columns' )
# Check your answer
q7.check()
q7.hint()
q7.solution()
```
# Keep going
Continue to **[grouping and sorting](https://www.kaggle.com/residentmario/grouping-and-sorting)**.
---
*Have questions or comments? Visit the [course discussion forum](https://www.kaggle.com/learn/pandas/discussion) to chat with other learners.*
| true |
code
| 0.30768 | null | null | null | null |
|
This demo provides examples of `ImageReader` class from `niftynet.io.image_reader` module.
What is `ImageReader`?
The main functionality of `ImageReader` is to search a set of folders, return a list of image files, and load the images into memory in an iterative manner.
A `tf.data.Dataset` instance can be initialised from an `ImageReader`, this makes the module readily usable as an input op to many tensorflow-based applications.
Why `ImageReader`?
- designed for medical imaging formats and applications
- works well with multi-modal input volumes
- works well with `tf.data.Dataset`
## Before the demo...
First make sure the source code is available, and import the module.
For NiftyNet installation, please checkout:
http://niftynet.readthedocs.io/en/dev/installation.html
```
import sys
niftynet_path = '/Users/bar/Documents/Niftynet/'
sys.path.append(niftynet_path)
from niftynet.io.image_reader import ImageReader
```
For demonstration purpose we download some demo data to `~/niftynet/data/`:
```
from niftynet.utilities.download import download
download('anisotropic_nets_brats_challenge_model_zoo_data')
```
## Use case: loading 3D volumes
```
from niftynet.io.image_reader import ImageReader
data_param = {'MR': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG'}}
reader = ImageReader().initialise(data_param)
reader.shapes, reader.tf_dtypes
# read data using the initialised reader
idx, image_data, interp_order = reader(idx=0)
image_data['MR'].shape, image_data['MR'].dtype
# randomly sample the list of images
for _ in range(3):
idx, image_data, _ = reader()
print('{} image: {}'.format(idx, image_data['MR'].shape))
```
The images are always read into a 5D-array, representing:
`[height, width, depth, time, channels]`
## User case: loading pairs of image and label by matching filenames
(In this case the loaded arrays are not concatenated.)
```
from niftynet.io.image_reader import ImageReader
data_param = {'image': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T2'},
'label': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Label'}}
reader = ImageReader().initialise(data_param)
# image file information (without loading the volumes)
reader.get_subject(0)
idx, image_data, interp_order = reader(idx=0)
image_data['image'].shape, image_data['label'].shape
```
## User case: loading multiple modalities of image and label by matching filenames
The following code initialises a reader with four modalities, and the `'image'` output is a concatenation of arrays loaded from these files. (The files are concatenated at the fifth dimension)
```
from niftynet.io.image_reader import ImageReader
data_param = {'T1': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1', 'filename_not_contains': 'T1c'},
'T1c': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1c'},
'T2': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T2'},
'Flair': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Flair'},
'label': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Label'}}
grouping_param = {'image': ('T1', 'T1c', 'T2', 'Flair'), 'label':('label',)}
reader = ImageReader().initialise(data_param, grouping_param)
_, image_data, _ = reader(idx=0)
image_data['image'].shape, image_data['label'].shape
```
## More properties
The input specification supports additional properties include
```python
{'csv_file', 'path_to_search',
'filename_contains', 'filename_not_contains',
'interp_order', 'pixdim', 'axcodes', 'spatial_window_size',
'loader'}
```
see also: http://niftynet.readthedocs.io/en/dev/config_spec.html#input-data-source-section
## Using ImageReader with image-level data augmentation layers
```
from niftynet.io.image_reader import ImageReader
from niftynet.layer.rand_rotation import RandomRotationLayer as Rotate
data_param = {'MR': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG'}}
reader = ImageReader().initialise(data_param)
rotation_layer = Rotate()
rotation_layer.init_uniform_angle([-10.0, 10.0])
reader.add_preprocessing_layers([rotation_layer])
_, image_data, _ = reader(idx=0)
image_data['MR'].shape
# import matplotlib.pyplot as plt
# plt.imshow(image_data['MR'][:, :, 50, 0, 0])
# plt.show()
```
## Using ImageReader with `tf.data.Dataset`
```
import tensorflow as tf
from niftynet.io.image_reader import ImageReader
# initialise multi-modal image and label reader
data_param = {'T1': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1', 'filename_not_contains': 'T1c'},
'T1c': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T1c'},
'T2': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'T2'},
'Flair': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Flair'},
'label': {'path_to_search': '~/niftynet/data/BRATS_examples/HGG',
'filename_contains': 'Label'}}
grouping_param = {'image': ('T1', 'T1c', 'T2', 'Flair'), 'label':('label',)}
reader = ImageReader().initialise(data_param, grouping_param)
# reader as a generator
def image_label_pair_generator():
"""
A generator wrapper of an initialised reader.
:yield: a dictionary of images (numpy arrays).
"""
while True:
_, image_data, _ = reader()
yield image_data
# tensorflow dataset
dataset = tf.data.Dataset.from_generator(
image_label_pair_generator,
output_types=reader.tf_dtypes)
#output_shapes=reader.shapes)
dataset = dataset.batch(1)
iterator = dataset.make_initializable_iterator()
# run the tensorlfow graph
with tf.Session() as sess:
sess.run(iterator.initializer)
for _ in range(3):
data_dict = sess.run(iterator.get_next())
print(data_dict.keys())
print('image: {}, label: {}'.format(
data_dict['image'].shape,
data_dict['label'].shape))
```
| true |
code
| 0.41739 | null | null | null | null |
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#MSVO-3,-70" data-toc-modified-id="MSVO-3,-70-1"><span class="toc-item-num">1 </span>MSVO 3, 70</a></span></li><li><span><a href="#Text-Fabric" data-toc-modified-id="Text-Fabric-2"><span class="toc-item-num">2 </span>Text-Fabric</a></span></li><li><span><a href="#Installing-Text-Fabric" data-toc-modified-id="Installing-Text-Fabric-3"><span class="toc-item-num">3 </span>Installing Text-Fabric</a></span><ul class="toc-item"><li><span><a href="#Prerequisites" data-toc-modified-id="Prerequisites-3.1"><span class="toc-item-num">3.1 </span>Prerequisites</a></span></li><li><span><a href="#TF-itself" data-toc-modified-id="TF-itself-3.2"><span class="toc-item-num">3.2 </span>TF itself</a></span></li></ul></li><li><span><a href="#Pulling-up-a-tablet-and-its-transliteration-using-a-p-number" data-toc-modified-id="Pulling-up-a-tablet-and-its-transliteration-using-a-p-number-4"><span class="toc-item-num">4 </span>Pulling up a tablet and its transliteration using a p-number</a></span></li><li><span><a href="#Non-numerical-quads" data-toc-modified-id="Non-numerical-quads-5"><span class="toc-item-num">5 </span>Non-numerical quads</a></span></li><li><span><a href="#Generating-a-list-of-sign-frequency-and-saving-it-as-a-separate-file" data-toc-modified-id="Generating-a-list-of-sign-frequency-and-saving-it-as-a-separate-file-6"><span class="toc-item-num">6 </span>Generating a list of sign frequency and saving it as a separate file</a></span></li></ul></div>
# Primer 1
This notebook is meant for those with little or no familiarity with
[Text-Fabric](https://github.com/annotation/text-fabric) and will focus on several basic tasks, including calling up an individual proto-cuneiform tablet using a p-number, the coding of complex proto-cuneiform signs using what we will call "quads" and the identification of one of the numeral systems, and a quick look at the frequency of a few sign clusters. Each primer, including this one, will focus on a single tablet and explore three or four analytical possibilities. In this primer we look at MSVO 3, 70, which has the p-number P005381 at CDLI.
## MSVO 3, 70
The proto-cuneiform tablet known as MSVO 3, 70, is held in the British Museum, where it has the museum number BM 140852. The tablet dates to the Uruk III period, ca. 3200-3000 BCE, and is slated for publication in the third volume of Materialien zu den frühen Schriftzeugnissen des Vorderen Orients (MSVO). Up to now it has only appeared as a photo in Frühe Schrift (Nissen, Damerow and Englund 1990), p. 38.
We'll show the lineart for this tablet and its ATF transcription in a moment, including a link to this tablet on CDLI.
## Text-Fabric
Text-Fabric (TF) is a model for textual data with annotations that is optimized for efficient data analysis. As we will begin to see at the end of this primer, when we check the totals on the reverse of our primer tablet, Text-Fabric also facilitates the creation of new, derived data, which can be added to the original data.
Working with TF is a bit like buying from IKEA. You get all the bits and pieces in a box, and then you assemble it yourself. TF decomposes any dataset into its components, nicely stacked, with every component uniquely labeled. And then we use short reusable bits of code to do specific things. TF is based on a model proposed by [Doedens](http://books.google.nl/books?id=9ggOBRz1dO4C) that focuses on the essential properties of texts such sequence and embedding. For a description of how Text-Fabric has been used for work on the Hebrew Bible, see Dirk Roorda's article [The Hebrew Bible as Data: Laboratory - Sharing - Experiences](https://doi.org/10.5334/bbi.18).
Once data is transformed into Text-Fabric, it can also be used to build rich online interfaces for specific groups of ancient texts. For the Hebrew Bible, have a look at [SHEBANQ](https://shebanq.ancient-data.org/hebrew/text).
The best environment for using Text-Fabric is in a [Jupyter Notebook](http://jupyter.readthedocs.io/en/latest/). This primer is in a Jupyter Notebook: the snippets of code can only be executed if you have installed Python 3, Jupyter Notebook, and Text-Fabric on your own computer.
## Installing Text-Fabric
### Prerequisites
You need to have Python on your system. Most systems have it out of the box,
but alas, that is python2 and we need at least python 3.6.
Install it from [python.org]() or from [Anaconda]().
If you got it from python.org, you also have to install [Jupyter]().
### TF itself
```
pip install text-fabric
```
if you have installed Python with the help of Anaconda, or
```
sudo -H pip3 install text-fabric
```
if you have installed Python from [python.org](https://www.python.org).
###### Execute: If all this is done, the following cells can be executed.
```
import os, sys, collections
from IPython.display import display
from tf.extra.cunei import Cunei
import sys, os
LOC = ("~/github", "Nino-cunei/uruk", "primer1")
A = Cunei(*LOC)
A.api.makeAvailableIn(globals())
```
## Pulling up a tablet and its transliteration using a p-number
Each cuneiform tablet has a unique "p-number" and we can use this p-number in Text-Fabric to bring up any images and the transliteration of a tablet, here P005381.
There is a "node" in Text-Fabric for this tablet. How do we find it and display the transliteration?
* We *search* for the tablet by means of a template;
* we use functions `A.lineart()` and `A.getSource()` to bring up the lineart and transliterations of tablets.
```
pNum = "P005381"
query = f"""
tablet catalogId={pNum}
"""
results = A.search(query)
```
The `results` is a list of "records".
Here we have only one result: `results[0]`.
Each result record is a tuple of nodes mentioned in the template.
Here we only mentioned a single thing: `tablet`.
So we find the node of the matched tablets as the firt member of the result records.
Hence the result tablet node is `results[0][0]`.
```
tablet = results[0][0]
A.lineart(tablet, width=300)
A.getSource(tablet)
```
Now we want to view the numerals on the tablet.
```
query = f"""
tablet catalogId={pNum}
sign type=numeral
"""
results = A.search(query)
```
It is easy to show them at a glance:
```
A.show(results)
```
Or we can show them in a table.
```
A.table(results)
```
There are a few different types of numerals here, but we are just going to look at the numbers belonging to the "shin prime prime" system, abbreviated here as "shinPP," which regularly adds two narrow horizatonal wedges to each number. N04, which is the basic unit in this system, is the fourth, fith and ninth of the preceding numerals: in the fourth occurrence repeated twice, in the fifth, three times and, unsurprisingly, in the ninth, which is the total on the reverse, five times. (N19, which is the next bundling unit in the same system, also occurs in the text.)
```
shinPP = dict(
N41=0.2,
N04=1,
N19=6,
N46=60,
N36=180,
N49=1800,
)
```
First, let's see if we can locate one of the occurrences of shinPP numerals, namely the set of 3(N04) in the first case of the second column on the obverse, using Text-Fabric.
```
query = f"""
tablet catalogId={pNum}
face type=obverse
column number=2
line number=1
=: sign
"""
results = A.search(query)
A.table(results)
```
Note the `:=` in `=: sign`. This is a device to require that the sign starts at the same position
as the `line` above it. Effectively, we ask for the first sign of the line.
Now the result records are tuples `(tablet, face, column, line, sign)`, so if we want
the sign part of the first result, we have to say `results[0][4]` (Python counts from 0).
```
num = results[0][4]
A.pretty(num, withNodes=True)
```
This number is the "node" in Text-Fabric that corresponds to the first sign in the first case of column 2. It is like a bar-code for that position in the entire corpus. Now let's make sure that this node, viz. 106602, is actually a numeral. To do this we check the feature "numeral" of the node 106602. And then we can use A.atfFromSign to extract the transliteration.
```
print(F.type.v(num) == "numeral")
print(A.atfFromSign(num))
```
Let's get the name of the numeral, viz. N04, and the number of times that it occurs. This amounts to splitting apart "3" and "(N04)" but since we are calling features in Text-Fabric rather than trying to pull elements out of the transliteration, we do not need to tweak the information.
```
grapheme = F.grapheme.v(num)
print(grapheme)
iteration = F.repeat.v(num)
print(iteration)
```
Now we can replace "N04" with its value, using the shinPP dictionary above, and then multiple this value by the number of iterations to arrive at the value of the numeral as a whole. Since each occurrence of the numeral N04 has a value of 1, three occurrences of it should have a value of 3.
```
valueFromDict = shinPP.get(grapheme)
value = iteration * valueFromDict
print(value)
```
Just to make sure that we are calculating these values correctly, let's try it again with a numeral whose value is not 1. There is a nice example in case 1b in column 1 on the obverse, where we have 3 occurrences of N19, each of which has a value of 6, so we expect the total value of 3(N19 to be 18.
```
query = f"""
tablet catalogId={pNum}
face type=obverse
column number=1
case number=1b
=: sign
"""
results = A.search(query)
A.table(results)
sign = results[0][4]
grapheme = F.grapheme.v(sign)
iteration = F.repeat.v(sign)
valueFromDict = shinPP.get(grapheme, 0)
value = iteration * valueFromDict
print(value)
```
The next step is to walk through the nodes on the obverse, add up the total of the shinPP system on the obverse, and then do the same for the reverse and see if the obverse and the total on the reverse add up. We expect the 3(N19) and 5(N04) on the obverse to add up to 23, viz. 18 + 5 = 23.
```
shinPPpat = "|".join(shinPP)
query = f"""
tablet catalogId={pNum}
face
sign grapheme={shinPPpat}
"""
results = A.search(query)
A.show(results)
sums = collections.Counter()
for (tablet, face, num) in results:
grapheme = F.grapheme.v(num)
iteration = F.repeat.v(num)
valueFromDict = shinPP[grapheme]
value = iteration * valueFromDict
sums[F.type.v(face)] += value
for faceType in sums:
print(f"{faceType}: {sums[faceType]}")
```
It adds up!
## Non-numerical quads
Now that we have identified the numeral system in the first case of column 2 on the obverse, let's also see what we can find out about the non-numeral signs in the same case.
We use the term "quad" to refer to all orthographic elements that occupy the space of a single proto-cuneiform sign on the surface of the tablet. This includes both an individual proto-cuneiform sign operating on its own as well as combinations of signs that occupy the same space. One of the most elaborate quads in the proto-cuneiform corpus is the following:
```
|SZU2.((HI+1(N57))+(HI+1(N57)))|
```
This quad has two sub-quads `SZU2`, `(HI+1(N57))+(HI+1(N57))`, and the second sub-quad also consists of two sub-quads `HI+1(N57)` and `HI+1(N57)`; both of these sub-quads are, in turn, composed of two further sub-quads `HI` and `1(N57)`.
First we need to pick this super-quad out of the rest of the line: this is how we get the transliteration of the entire line:
```
query = f"""
tablet catalogId={pNum}
face type=obverse
column number=2
line number=1
"""
results = A.search(query)
line = results[0][3]
A.pretty(line, withNodes=True)
```
We can just read off the node of the biggest quad.
```
bigQuad = 143015
```
Now that we have identified the "bigQuad," we can also ask Text-Fabric to show us what it looks like.
```
A.lineart(bigQuad)
```
This extremely complex quad, viz. |SZU2.((HI+1(N57))+(HI+1(N57)))|, is a hapax legomenon, meaning that it only occurs here, but there are three other non-numeral quads in this line besides |SZU2.((HI+1(N57))+(HI+1(N57)))|, namely |GISZ.TE|, GAR and GI4~a, so let's see how frequent these four non-numeral signs are in the proto-cuneiform corpus. We can do this sign by sign using the function "F.grapheme.s()".
```
GISZTEs = F.grapheme.s("|GISZ.TE|")
print(f"|GISZ.TE| {len(GISZTEs)} times")
GARs = F.grapheme.s("GAR")
print(f"GAR = {len(GARs)} times")
GI4s = F.grapheme.s("GI4")
print(f"GI4 = {len(GI4s)} times")
```
There are two problems here that we need to resolve in order to get good numbers: we have to get Text-Fabric to count |GISZ.TE| as a single unit, even though it is composed of two distinct graphemes, and we have to ask it to recognize and count the "a" variant of "GI4". In order to count the number of quads that consist of GISZ and TE, namely |GISZ.TE|, it is convenient to make a frequency index for all quads.
We walk through all the quads, pick up its ATF, and count the frequencies of ATF representations.
```
quadFreqs = collections.Counter()
for q in F.otype.s("quad"):
quadFreqs[A.atfFromQuad(q)] += 1
```
With this in hand, we can quickly count how many quads there are that have both signs `GISZ` and `TE` in them.
Added bonus: we shall also see whether there are quads with both of these signs but composed with other operators and signs as well.
```
for qAtf in quadFreqs:
if "GISZ" in qAtf and "TE" in qAtf:
print(f"{qAtf} ={quadFreqs[qAtf]:>4} times")
```
And we can also look at the set of quads in which GISZ co-occurs with another sign, and likewise, the set of quads in which TE co-occurs with another sign.
```
for qAtf in quadFreqs:
if "GISZ" in qAtf:
print(f"{quadFreqs[qAtf]:>4} x {qAtf}")
for qAtf in quadFreqs:
if "TE" in qAtf:
print(f"{quadFreqs[qAtf]:>4} x {qAtf}")
```
Most of the time, however, when we are interested in particular sign frequencies, we want to cast a wide net and get the frequency of any possibly related sign or quad. The best way to do this is to check the ATF of any sign or quad that might be relevant and add up the number of its occurrences in the corpus. This following script checks both signs and quads and casts the net widely. It looks for the frequency of our same three signs/quads, namely GAR, GI4~a and |GISZ.TE|.
```
quadSignFreqs = collections.Counter()
quadSignTypes = {"quad", "sign"}
for n in N():
nType = F.otype.v(n)
if nType not in quadSignTypes:
continue
atf = A.atfFromQuad(n) if nType == "quad" else A.atfFromSign(n)
quadSignFreqs[atf] += 1
```
We have now an frequency index for all signs and quads in their ATF representation.
Note that if a sign is part of a bigger quad, its occurrence there will be counted as an occurrence of the sign.
```
selectedAtfs = []
for qsAtf in quadSignFreqs:
if "GAR" in qsAtf or "GI4~a" in qsAtf or "|GISZ.TE|" in qsAtf:
selectedAtfs.append(qsAtf)
print(f"{quadSignFreqs[qsAtf]:>4} x {qsAtf}")
```
Let's draw all these quads.
```
for sAtf in selectedAtfs:
A.lineart(sAtf, width="5em", height="5em", withCaption="right")
```
Besides our three targets, 34 occurrences of GI4~a, 401 of GAR and 26 of |GISZ.TE|:
34 x GI4~a
401 x GAR
26 x |GISZ.TE|
it has also pulled in a number of quads that include either GAR or GI4~a, among others:
20 x |ZATU651xGAR|
3 x |NINDA2xGAR|
6 x |4(N57).GAR|
1 x |GI4~a&GI4~a|
1 x |GI4~axA|
There are also other signs tas well as signs that only resemble GAR in transliteration such as LAGAR or GARA2, but as long as we know what we are looking for this type of broader frequency count can be quite useful.
## Generating a list of sign frequency and saving it as a separate file
First, we are going to count the number of distinct signs in the corpus, look at the top hits in the list and finally save the full list to a separate file. Then we will do the same for the quads, and then lastly we are going to combine these two lists and save them as a single frequency list for both signs and quads.
```
fullGraphemes = collections.Counter()
for n in F.otype.s("sign"):
grapheme = F.grapheme.v(n)
if grapheme == "" or grapheme == "…":
continue
fullGrapheme = A.atfFromSign(n)
fullGraphemes[fullGrapheme] += 1
len(fullGraphemes)
```
So there are 1477 distinct proto-cuneiform signs in the corpus. The following snippet of code will show us the first 20 signs on that list.
```
for (value, frequency) in sorted(
fullGraphemes.items(),
key=lambda x: (-x[1], x[0]),
)[0:20]:
print(f"{frequency:>5} x {value}")
```
Now we are going to write the full set of sign frequency results to two files in your `_temp` directory, within this repo. The two files are called:
* `grapheme-alpha.txt`, an alphabetic list of graphemes, along with the frequency of each sign, and
* `grapheme-freq.txt`, which runs from the most frequent to the least.
```
def writeFreqs(fileName, data, dataName):
print(f"There are {len(data)} {dataName}s")
for (sortName, sortKey) in (
("alpha", lambda x: (x[0], -x[1])),
("freq", lambda x: (-x[1], x[0])),
):
with open(f"{A.tempDir}/{fileName}-{sortName}.txt", "w") as fh:
for (item, freq) in sorted(data, key=sortKey):
if item != "":
fh.write(f"{freq:>5} x {item}\n")
```
Now let's go through some of the same steps for quads rather than individual signs, and then export a single frequency list for both signs and quads.
```
quadFreqs = collections.Counter()
for q in F.otype.s("quad"):
quadFreqs[A.atfFromQuad(q)] += 1
print(len(quadFreqs))
```
So there are 740 quads in the corpus, and now we ask for the twenty most frequently attested quads.
```
for (value, frequency) in sorted(
quadFreqs.items(),
key=lambda x: (-x[1], x[0]),
)[0:20]:
print(f"{frequency:>5} x {value}")
```
And for the final task in this primer, we ask Text-Fabric to export a frequency list of both signs and quads in a separate file.
```
reportDir = "reports"
os.makedirs(reportDir, exist_ok=True)
def writeFreqs(fileName, data, dataName):
print(f"There are {len(data)} {dataName}s")
for (sortName, sortKey) in (
("alpha", lambda x: (x[0], -x[1])),
("freq", lambda x: (-x[1], x[0])),
):
with open(f"{reportDir}/{fileName}-{sortName}.txt", "w") as fh:
for (item, freq) in sorted(data.items(), key=sortKey):
if item != "":
fh.write(f"{freq:>5} x {item}\n")
```
This shows up as a pair of files named "quad-signs-alpha.txt" and "quad-signs-freq.txt" and if we copy a few pieces of the quad-signs-freq.txt file here, they look something like this:
29413 x ...
12983 x 1(N01)
6870 x X
3080 x 2(N01)
2584 x 1(N14)
1830 x EN~a
1598 x 3(N01)
1357 x 2(N14)
1294 x 5(N01)
1294 x SZE~a
1164 x GAL~a
Only much farther down the list do we see signs and quads interspersed; here are the signs/quads around 88 occurrences:
88 x NIMGIR
88 x NIM~a
88 x SUG5
86 x EN~b
86 x NAMESZDA
86 x |GI&GI|
85 x GU
85 x |GA~a.ZATU753|
84 x BAD~a
84 x NA2~a
84 x ZATU651
84 x |1(N58).BAD~a|
83 x ZATU759
| true |
code
| 0.241803 | null | null | null | null |
|
# Object-Oriented Python
During this session, we will be exploring the Oriented-Object paradigm in Python using all what we did with Pandas in previous sessions. We will be working with the same data of aircraft supervising latest Tour de France.
```
import pandas as pd
df = pd.read_json("../data/tour_de_france.json.gz")
```
There are three main principles around OOP:
- **encapsulation**: objects embed properties (attributes, methods);
- **interface**: objects expose and document services, they hide all about their inner behaviour;
- **factorisation**: objects/classes with similar behaviour are grouped together.
A common way of working with Python is to implement **protocols**. Protocols are informal interfaces defined by a set of methods allowing an object to play a particular role in the system. For instance, for an object to behave as an iterable you don't need to subclass an abstract class Iterable or implement explicitely an interface Iterable: it is enough to implement the special methods `__iter__` method or even just the `__getitem__` (we will go through these concepts hereunder).
Let's have a look at the special method `sorted`: it expects an **iterable** structure of **comparable** objects to return a sorted list of these objects. Let's have a look:
```
sorted([-2, 4, 0])
```
However it fails when object are not comparable:
```
sorted([-1, 1+1j, 1-2j])
```
Then we can write our own ComparableComplex class and implement a comparison based on modules. The **comparable** protocol expects the `<` operator to be defined (special keyword: `__lt__`)
```
class ComparableComplex(complex):
def __lt__(a, b):
return abs(a) < abs(b)
# Now this works: note the input is not a list but a generator.
sorted(ComparableComplex(i) for i in [-1, 1 + 1j, 1 - 2j])
```
We will be working with different views of pandas DataFrame for trajectories and collection of trajectories. Before we start any further, let's remember two ways to factorise behaviours in Object-Oriented Programming: **inheritance** and **composition**.
The best way to do is not always obvious and it often takes experience to find the good and bad sides of both paradigms.
In our previous examples, our ComparableComplex *offered not much more* than complex numbers. As long as we don't need to compare them, we could have *put them in a list together* with regular complex numbers *without loss of generality*: after all a ComparableComplex **is** a complex. That's a good smell for **inheritance**.
If we think about our trajectories, we will build them around pandas DataFrames. Trajectories will probably have a single attribute: the dataframe. It could be tempting to inherit from `pd.DataFrame`; it will probably work fine in the beginning but problems will occur sooner than expected (most likely with inconsistent interfaces). We **model** trajectories and collections of trajectories with dataframes, but a trajectory **is not** a dataframe. Be reasonable and go for **composition**.
So now we can start.
- The `__init__` special method defines a constructor. `self` is necessary: it represents the current object.
Note that **the constructor does not return anything**.
```
class FlightCollection:
def __init__(self, data):
self.data = data
class Flight:
def __init__(self, data):
self.data = data
FlightCollection(df)
```
## Special methods
There is nothing much we did at this point: just two classes holding a dataframe as an attribute. Even the output representation is the default one based on the class name and the object's address in memory.
- we can **override** the special `__repr__` method (which **returns** a string—**do NOT** `print`!) in order to display a more relevant output. You may use the number of lines in the underlying dataframe for instance.
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>__repr__</code> method.
</div>
```
# %load ../solutions/pandas_oo/flight_repr.py
"{0!r}".format(FlightCollection(df))
```
Note that we passed the dataframe in the constructor. We want to keep it that way (we will see later why). However we may want to create a different type of constructor to read directly from the JSON file. There is a special kind of keyword for that.
- `@classmethod` is a decorator to put before a method. It makes it an **class method**, i.e. you call it on the class and not on the object. The first parameter is no longer `self` (the instance) but by convention `cls` (the class).
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>read_json</code> class method.
</div>
```
# %load ../solutions/pandas_oo/flight_json.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
```
Now we want to make this `FlightCollection` iterable.
- The special method to implement is `__iter__`. This method takes no argument and **yields** elements one after the other.
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>__iter__</code> method which yields Flight instances.
</div>
Of course, you should reuse the code of last session about iteration.
```
# %load ../solutions/pandas_oo/flight_iter.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
for flight in collection:
print(flight)
```
<div class='alert alert-warning'>
<b>Exercice:</b> Write a relevant <code>__repr__</code> method for Flight including callsign, aircraft icao24 code and day of the flight.
</div>
```
# %load ../solutions/pandas_oo/flight_nice_repr.py
for flight in collection:
print(flight)
```
<div class='alert alert-success'>
<b>Note:</b> Since our FlightCollection is iterable, we can pass it to any method accepting iterable structures.
</div>
```
list(collection)
```
<div class='alert alert-warning'>
<b>Warning:</b> However, it won't work here, because Flight instances cannot be compared, unless we specify on which criterion we want to compare.
</div>
```
sorted(collection)
sorted(collection, key=lambda x: x.min("timestamp"))
```
<div class='alert alert-warning'>
<b>Exercice:</b> Implement the proper missing method so that a FlightCollection can be sorted.
</div>
```
# %load ../solutions/pandas_oo/flight_sort.py
sorted(collection)
```
## Data visualisation
See the following snippet of code for plotting trajectories on a map.
```
import matplotlib.pyplot as plt
from cartopy.crs import EuroPP, PlateCarree
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=EuroPP()))
ax.coastlines("50m")
for flight in collection:
flight.data.plot(
ax=ax,
x="longitude",
y="latitude",
legend=False,
transform=PlateCarree(),
color="steelblue",
)
ax.set_extent((-5, 10, 42, 52))
ax.set_yticks([])
```
<div class='alert alert-warning'>
<b>Exercice:</b> Implement a plot method to make the job even more simple.
</div>
```
# %load ../solutions/pandas_oo/flight_plot.py
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=EuroPP()))
ax.coastlines("50m")
for flight in collection:
flight.plot(ax, color="steelblue")
ax.set_extent((-5, 10, 42, 52))
ax.set_yticks([])
```
## Indexation
Until now, we implemented all what is necessary to iterate on structures.
This means we have all we need to yield elements one after the other.
Note that:
- Python does not assume your structure has a length.
(There are some infinite iterators, like the one yielding natural integers one after the other.)
- Python cannot guess for you how you want to index your flights.
```
len(collection)
collection['ASR172B']
```
There are many ways to proceed with indexing. We may want to select flights with a specific callsign, or a specific icao24 code. Also, if only one Flight is returned, we want a Flight object. If two or more segments are contained in the underlying dataframe, we want to stick to a FlightCollection.
<div class="alert alert-warning">
<b>Exercice:</b> Implement a <code>__len__</code> special method, then a <code>__getitem__</code> special method that will return a Flight or a FlightCollection (depending on the selection) wrapping data corresponding to the given callsign or icao24 code.
</div>
```
# %load ../solutions/pandas_oo/flight_index.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
collection
collection["3924a0"]
collection["ASR172B"]
from collections import defaultdict
count = defaultdict(int)
for flight in collection["ASR172B"]:
count[flight.icao24] += 1
count
```
As we can see here, this method for indexing is not convenient enough. We could select the only flight `collection["ASR172B"]["3924a0"]` but with current implementation, there is no way to separate the 18 other flights.
<div class='alert alert-warning'>
<b>Exercice:</b> Implement a different <code>__getitem__</code> method that checks the type of the index: filter on callsign/icao24 if the key is a <code>str</code>, filter on the day of the flight if the key is a <code>pd.Timestamp</code>.
</div>
```
# %load ../solutions/pandas_oo/flight_index_time.py
collection = FlightCollection.read_json("../data/tour_de_france.json.gz")
collection["ASR172B"][pd.Timestamp("2019-07-18")]
```
<div class='alert alert-warning'>
<b>Exercice:</b> Plot all trajectories flying on July 18th. How can they be sure to not collide with each other?
</div>
```
# %load ../solutions/pandas_oo/flight_plot_july18.py
```
| true |
code
| 0.4165 | null | null | null | null |
|
# Import packages and functions
```
import sys
# force the notebook to look for files in the upper level directory
sys.path.insert(1, '../')
import pandas as pd
from glob import glob
import pymatgen as mg
from data.compound_featurizer import read_new_struct, \
get_struct, get_elem_info, get_elem_distances, \
calc_mm_dists, calc_mx_dists, calc_xx_dists, calc_elem_max_potential
```
# Read in the initial dataframe
```
# initialize an empty list of dataframes
df_lst = []
# iterate over all the cif files
for struct_file_path in glob("./user_defined_structures/featurizer_sub_function_demo/*.cif"):
# add the newly read in dataframe to the list
df_lst.append(read_new_struct(struct_file_path))
# concatenate all the dataframes in the list
df = pd.concat(df_lst, ignore_index=True)
# assign oxidation states to BaTiO3 and Mg2AlFeO5
df.at[df[df.Compound == "BaTiO3"].index[0], "structure"].add_oxidation_state_by_element({"Ba": 2, "Ti": 4, "O": -2})
df.at[df[df.Compound == "Mg2AlFeO5"].index[0], "structure"].add_oxidation_state_by_element({"Mg": 2, "Al": 3, "Fe": 3, "O": -2})
# here is a print out of the dataframe
df
```
# Demo usage of relevant sub-functions
## 1. get_struct("compound_formula", input_df) -> Pymatgen Structure
Since we've already read in all the structures in dataframe, we can access the individual Pymatgen structure using the compound formula.
_Tip_: when you have questions about a specific function, you can always go to the original .py file or you can press <kbd>⇧ Shift</kbd> + <kbd>⇥ Tab</kbd> for its docstring
```
test_struct = get_struct("BaTiO3", df)
test_struct
```
If you happen to type in a formula that doesn't have an exact match, the function will return an error message along with several possible suggestions
```
get_struct("BaTiO", df)
```
_BaTiO3_ will be used consistently as the demo test structure from now on.
## 2. get_elem_distances(Pymatgen_Structure, Pymatgen_Element_1, Pymatgen_Element_2) -> Array of distances (Å)
Now that we have the structure, we can use **get_elem_distances()** to calculate the distance between any two elements in the structure
But before doing that, we first need to know which site(s) each element occupies through the **get_elem_info()** function
```
elem_indices, _, modified_struct = get_elem_info(test_struct)
print(elem_indices, "\n")
print(modified_struct)
```
If you compare this to the printout from the original, you will find that the modified structure have double the amount of sites
```
print(test_struct)
```
This is because if we keep the original function, _Ba_ and _Ti_ will only occupy one site
```
elem_indices_orig, *_ = get_elem_info(test_struct, makesupercell=False)
elem_indices_orig
```
The reason for returning a supercell of the original structure is related to the inner workings of **get_elem_distances()** function. It basically works by getting the site indices of the two elements (they can be the same) and using the built-in method of **pymatgen.Structure.get_distance(i, j)** to calculate the distance between site i and site j. There is one scenario where only using the original structure can cause a problem:
1. If we have a structure where an element only occupies one site and we want to know the distance between the same elements, e.g. _Ba_-_Ba_ or _Ti_-_Ti_ in _BaTiO3_, we would have **pymatgen.Structure.get_distance(i, j)** where i = j and we would only get 0 for that distance.
By making a supercell (in this case a'=2a, b'=b, c'=c), we would be able to get a non-zero distance betweem the original site and the newly translated site along the a-axis. That being said, if all elements in the original structure all occupy more than one site, the structure will not be modified.
Let's first try to calculate the _Ba_-_Ba_ distance using the supercell structure
```
get_elem_distances(test_struct,
elem_1=mg.Element("Ba"),
elem_indices=elem_indices, only_unique=True)
```
**Note**: when the `only_unique` parameter is set to be `True`, the function will only return the unique values of distance since in a structure the same distance can occur multiple times due to symmetry.
Let's see what happens when we use the original reduced structure
```
get_elem_distances(test_struct,
elem_1=mg.Element("Ba"),
elem_indices=elem_indices_orig, only_unique=True)
```
As expected, we get 0 Å. We can also calculate the distance between different elements. Let's see the distance between _Ti_ and _O_
```
get_elem_distances(test_struct,
elem_1=mg.Element("O"), elem_2=mg.Element("Ti"),
elem_indices=elem_indices_orig, only_unique=True)
```
This function can also handle structures where multiple elements can occupy the same site (La$_{2.8}$Mg$_{1.2}$Mn$_4$O$_{12}$ is a made-up structure generated for the purpose of this demo)
```
special_struct = get_struct("La2.8Mg1.2Mn4O12", df)
print(special_struct)
elem_indices, *_ = get_elem_info(special_struct)
distances = get_elem_distances(special_struct,
elem_1=mg.Element("La"), elem_2=mg.Element("Mn"),
elem_indices=elem_indices, only_unique=True)
distances
```
It may seem that there are some distances that are equal to each other, but since the values displayed do not have all the decimal places shown, there are still slight differences among them.
```
distances[0] - distances[1]
```
## 3. Wrapper functions around get_elem_distances() to calculate distances between different types of elements
### 3.1 calc_mm_dists() to calculate distances between metal-metal elements
```
calc_mm_dists(test_struct, return_unique=True)
```
### 3.2 calc_mx_dists() to calculate distances between metal-non_metal elements
```
calc_mx_dists(test_struct, return_unique=True)
```
### 3.3 calc_xx_dists() to calculate distances between non_metal-non_metal elements
```
calc_xx_dists(test_struct, return_unique=True)
```
This functionality is realized again through the **get_elem_info()** function where all the elements in the structure is classified as either a metal or a non_metal.
```
_, elem_groups, _ = get_elem_info(test_struct)
elem_groups
```
Once we know which elements are metal and which ones are non_metal, we can then use the elem_indices to find where they are (i.e. the site indices) and compute the distances using the generic element distance finder **get_elem_distances()**.
## 4. calc_elem_max_potential() to calculate Madelung Site Potentials
The **calc_elem_max_potential()** utilizes the EwaldSummation() module from Pymatgen to calculate site energy for all the sites in a structure and convert the site energy to site potential using the relation as follows. ($U_{E_\text{tot}}$: the total potential energy of the structure, $U_{E_i}$: the site energy at site i, $N$: the total number of sites, $q_i$: the charge at site i, $\Phi(r_i)$: the site potential at site i)
$$
\begin{align*}
U_{E_\text{tot}}&=\sum_{i=1}^{N}U_{E_i}=\frac{1}{2}\sum_{i=1}^{N}q_i\Phi(r_i)\\
U_{E_i}&=\frac{1}{2}q_i\Phi(r_i)\\
\Phi(r_i)&=\frac{2U_{E_i}}{q_i}
\end{align*}
$$
The default output unit for the Madelung site potential is in $V$
```
calc_elem_max_potential(test_struct, full_list=True)
```
But the unit can be converted from $V$ to $e/Å$ for easier comparison with the results from VESTA
```
calc_elem_max_potential(test_struct, full_list=True, check_vesta=True)
```
If we don't specify the `full_list` parameter, it will be set to `False` and the function only return the maximum site potential for each element.
```
calc_elem_max_potential(test_struct)
```
Just like before, this function can also work with structures where multiple elements occupy the same site. We can try a compound with non-integer stoichiometry this time. (again, Mg$_2$AlFeO$_5$ is a made-up structure)
```
non_stoich_struct = get_struct("Mg2AlFeO5", df)
print(non_stoich_struct)
calc_elem_max_potential(non_stoich_struct, check_vesta=True)
```
# Now it's your turn
If you want to test the functions with structures that are not in the loaded dataframe, you can also upload your own .cif file to the `user_defined` folder located at this path
_./user_defined_structures/_
```
USER_DEFINED_FOLDER_PATH = "./user_defined_structures/"
example_new_struct = mg.Structure.from_file(USER_DEFINED_FOLDER_PATH + "CuNiO2_mp-1178372_primitive.cif")
example_new_struct
```
## Define a wrapper function around get_elem_distances()
```
def get_elem_distances_wrapper(structure: mg.Structure, **kwargs):
"""A wrapper function around get_elem_distances() such that there is no need to get elem_indices manually"""
elem_indices, _, structure = get_elem_info(structure)
return get_elem_distances(structure, elem_indices=elem_indices, only_unique=True, **kwargs)
```
Check the _Cu_-_Ni_ distance
```
get_elem_distances_wrapper(example_new_struct, elem_1=mg.Element("Cu"), elem_2=mg.Element("Ni"))
```
Check the _Ni_-_O_ distance
```
get_elem_distances_wrapper(example_new_struct, elem_1=mg.Element("O"), elem_2=mg.Element("Ni"))
```
Check the _Cu_-_Cu_ distance
```
get_elem_distances_wrapper(example_new_struct, elem_1=mg.Element("Cu"))
```
## Get distances of all three types of element pairs
```
calc_mm_dists(example_new_struct)
calc_mx_dists(example_new_struct)
calc_xx_dists(example_new_struct)
```
## A note for site potential calculation
To use the EwaldSummation technique, the input structure has to have oxidation states (that's where the charge value comes from) associated with all the sites. A structure without oxidation states will raise an error in the function.
```
calc_elem_max_potential(example_new_struct)
```
To overcome this problem, we can add oxidation states to the structure using the add_oxidation_state_by_guess() method from Pymatgen
```
example_new_struct.add_oxidation_state_by_guess()
example_new_struct
```
Now that we should be able to obtain proper results from the function.
```
calc_elem_max_potential(example_new_struct, check_vesta=True)
```
| true |
code
| 0.447098 | null | null | null | null |
|
# MAPEM de Pierro algorithm for the Bowsher prior
One of the more popular methods for guiding a reconstruction based on a high quality image was suggested by Bowsher. This notebook explores this prior.
We highly recommend you look at the [PET/MAPEM](../PET/MAPEM.ipynb) notebook first. This example extends upon the quadratic prior used in that notebook to use an anatomical prior.
Authors: Kris Thielemans, Sam Ellis, Richard Brown, Casper da Costa-Luis
First version: 22nd of October 2019
Second version: 27th of October 2019
Third version: June 2021
CCP SyneRBI Synergistic Image Reconstruction Framework (SIRF)
Copyright 2019,20201 University College London
Copyright 2019 King's College London
This is software developed for the Collaborative Computational
Project in Synergistic Reconstruction for Biomedical Imaging. (http://www.synerbi.ac.uk/).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# Brief description of the Bowsher prior
The "usual" quadratic prior penalises differences between neighbouring voxels (using the square of the difference). This tends to oversmooth parts of the image where you know there should be an edge. To overcome this, it is natural to not penalise the difference between those "edge" voxels. This can be done after segmentation of the anatomical image for instance.
Bowsher suggested a segmentation-free approach to use an anatomical (or any "side" image) as follows:
- compute edge information on the anatomical image.
- for each voxel, consider only the $N_B$ neighbours which have the lowest difference in the anatomical image.
The paper is
Bowsher, J. E., Hong Yuan, L. W. Hedlund, T. G. Turkington, G. Akabani, A. Badea, W. C. Kurylo, et al. ‘Utilizing MRI Information to Estimate F18-FDG Distributions in Rat Flank Tumors’. In IEEE Symposium Conference Record Nuclear Science 2004., 4:2488-2492 Vol. 4, 2004. https://doi.org/10.1109/NSSMIC.2004.1462760.
# All the normal imports and handy functions
```
%matplotlib notebook
# Setup the working directory for the notebook
import notebook_setup
from sirf_exercises import cd_to_working_dir
cd_to_working_dir('Synergistic', 'MAPEM_Bowsher')
#%% Initial imports etc
import numpy
import matplotlib.pyplot as plt
import os
import sys
import shutil
from tqdm.auto import tqdm, trange
import time
from scipy.ndimage.filters import gaussian_filter
import sirf.STIR as pet
from numba import jit
from sirf_exercises import exercises_data_path
brainweb_sim_data_path = exercises_data_path('working_folder', 'Synergistic', 'BrainWeb')
# set-up redirection of STIR messages to files
msg_red = pet.MessageRedirector('info.txt', 'warnings.txt', 'errors.txt')
# plotting settings
plt.ion() # interactive 'on' such that plots appear during loops
#%% some handy function definitions
def imshow(image, limits=None, title=''):
"""Usage: imshow(image, [min,max], title)"""
plt.title(title)
bitmap = plt.imshow(image)
if limits is None:
limits = [image.min(), image.max()]
plt.clim(limits[0], limits[1])
plt.colorbar(shrink=.6)
plt.axis('off')
return bitmap
def make_cylindrical_FOV(image):
"""truncate to cylindrical FOV"""
filt = pet.TruncateToCylinderProcessor()
filt.apply(image)
#%% define a function for plotting images and the updates
# This is the same function as in `ML_reconstruction`
def plot_progress(all_images, title, subiterations, cmax):
if len(subiterations)==0:
num_subiters = all_images[0].shape[0]-1
subiterations = range(1, num_subiters+1)
num_rows = len(all_images);
slice_show = 60
for it in subiterations:
plt.figure()
for r in range(num_rows):
plt.subplot(num_rows,2,2*r+1)
imshow(all_images[r][it,slice_show,:,:], [0,cmax], '%s at %d' % (title[r], it))
plt.subplot(num_rows,2,2*r+2)
imshow(all_images[r][it,slice_show,:,:]-all_images[r][it-1,slice_show,:,:],[-cmax*.1,cmax*.1], 'update')
plt.show();
def subplot_(idx,vol,title,clims=None,cmap="viridis"):
plt.subplot(*idx)
plt.imshow(vol,cmap=cmap)
if not clims is None:
plt.clim(clims)
plt.colorbar()
plt.title(title)
plt.axis("off")
```
# Load the data
To generate the data needed for this notebook, run the [BrainWeb](./BrainWeb.ipynb) notebook first.
```
full_acquired_data = pet.AcquisitionData(os.path.join(brainweb_sim_data_path, 'FDG_sino_noisy.hs'))
atten = pet.ImageData(os.path.join(brainweb_sim_data_path, 'uMap_small.hv'))
# Anatomical image
anatomical = pet.ImageData(os.path.join(brainweb_sim_data_path, 'T1_small.hv')) # could be T2_small.hv
anatomical_arr = anatomical.as_array()
# create initial image
init_image=atten.get_uniform_copy(atten.as_array().max()*.1)
make_cylindrical_FOV(init_image)
plt.figure()
imshow(anatomical.as_array()[64, :, :])
plt.show()
plt.figure()
imshow(full_acquired_data.as_array()[0, 64, :, :])
plt.show()
```
# Code from first MAPEM notebook
The following chunk of code is copied and pasted more-or-less directly from the other notebook as a starting point.
First, run the code chunk to get the objective functions etc
### construction of Likelihood objective functions and OSEM
```
def get_obj_fun(acquired_data, atten):
print('\n------------- Setting up objective function')
# #%% create objective function
#%% create acquisition model
am = pet.AcquisitionModelUsingRayTracingMatrix()
am.set_num_tangential_LORs(5)
# Set up sensitivity due to attenuation
asm_attn = pet.AcquisitionSensitivityModel(atten, am)
asm_attn.set_up(acquired_data)
bin_eff = pet.AcquisitionData(acquired_data)
bin_eff.fill(1.0)
asm_attn.unnormalise(bin_eff)
asm_attn = pet.AcquisitionSensitivityModel(bin_eff)
# Set sensitivity of the model and set up
am.set_acquisition_sensitivity(asm_attn)
am.set_up(acquired_data,atten);
#%% create objective function
obj_fun = pet.make_Poisson_loglikelihood(acquired_data)
obj_fun.set_acquisition_model(am)
print('\n------------- Finished setting up objective function')
return obj_fun
def get_reconstructor(num_subsets, num_subiters, obj_fun, init_image):
print('\n------------- Setting up reconstructor')
#%% create OSEM reconstructor
OSEM_reconstructor = pet.OSMAPOSLReconstructor()
OSEM_reconstructor.set_objective_function(obj_fun)
OSEM_reconstructor.set_num_subsets(num_subsets)
OSEM_reconstructor.set_num_subiterations(num_subiters)
#%% initialise
OSEM_reconstructor.set_up(init_image)
print('\n------------- Finished setting up reconstructor')
return OSEM_reconstructor
# Use rebin to create a smaller sinogram to speed up calculations
acquired_data = full_acquired_data.clone()
acquired_data = acquired_data.rebin(3)
# Get the objective function
obj_fun = get_obj_fun(acquired_data, atten)
```
# Implement de Pierro MAP-EM for a quadratic prior with arbitrary weights
The following code is almost a copy-paste of the implementation by A. Mehranian and S. Ellis [contributed during one of our hackathons](https://github.com/SyneRBI/SIRF-Contribs/tree/master/src/Python/sirf/contrib/kcl). It is copied here for you to have an easier look.
Note that the code avoids the `for` loops in our simplistic version above and hence should be faster (however, the construction of the neighbourhood is still slow, but you should have to do this only once). Also, this is a Python reimplementation of MATLAB code (hence the use of "Fortran order" below).
```
def dePierroReg(image,weights,nhoodIndVec):
"""Get the de Pierro regularisation image (xreg)"""
imSize = image.shape
# vectorise image for indexing
imageVec = image.reshape(-1,order='F')
# retrieve voxel intensities for neighbourhoods
resultVec = imageVec[nhoodIndVec]
result = resultVec.reshape(weights.shape,order='F')
# compute xreg
imageReg = 0.5*numpy.sum(weights*(result + image.reshape(-1,1,order='F')),axis=1)/numpy.sum(weights,axis=1)
imageReg = imageReg.reshape(imSize,order='F')
return imageReg
def compute_nhoodIndVec(imageSize,weightsSize):
"""Get the neigbourhoods of each voxel"""
w = int(round(weightsSize[1]**(1.0/3))) # side length of neighbourhood
nhoodInd = neighbourExtract(imageSize,w)
return nhoodInd.reshape(-1,order='F')
def neighbourExtract(imageSize,w):
"""Adapted from kcl.Prior class"""
n = imageSize[0]
m = imageSize[1]
h = imageSize[2]
wlen = 2*numpy.floor(w/2)
widx = xidx = yidx = numpy.arange(-wlen/2,wlen/2+1)
if h==1:
zidx = [0]
nN = w*w
else:
zidx = widx
nN = w*w*w
Y,X,Z = numpy.meshgrid(numpy.arange(0,m), numpy.arange(0,n), numpy.arange(0,h))
N = numpy.zeros([n*m*h, nN],dtype='int32')
l = 0
for x in xidx:
Xnew = setBoundary(X + x,n)
for y in yidx:
Ynew = setBoundary(Y + y,m)
for z in zidx:
Znew = setBoundary(Z + z,h)
N[:,l] = ((Xnew + (Ynew)*n + (Znew)*n*m)).reshape(-1,1).flatten('F')
l += 1
return N
def setBoundary(X,n):
"""Boundary conditions for neighbourExtract.
Adapted from kcl.Prior class"""
idx = X<0
X[idx] = X[idx] + n
idx = X>n-1
X[idx] = X[idx] - n
return X.flatten('F')
@jit
def dePierroUpdate(xEM, imageReg, beta):
"""Update the image based on the de Pierro regularisation image"""
return (2*xEM)/(((1 - beta*imageReg)**2 + 4*beta*xEM)**0.5 + (1 - beta*imageReg) + 0.00001)
def MAPEM_iteration(OSEM_reconstructor,current_image,weights,nhoodIndVec,beta):
image_reg = dePierroReg(current_image.as_array(),weights,nhoodIndVec) # compute xreg
OSEM_reconstructor.update(current_image); # compute EM update
image_EM=current_image.as_array() # get xEM as a numpy array
updated = dePierroUpdate(image_EM, image_reg, beta) # compute new uxpdate
current_image.fill(updated) # store for next iteration
return current_image
```
## Create uniform and Bowsher weights
We will use the `kcl.Prior` class here to construct the Bowsher weights given an anatomical image. The `kcl.Prior` class (and the above code) assumes that the `weights` are returned an $N_v \times N_n$ array, with $N_v$ the number of voxels and $N_n$ the number of neighbours (here 27 as the implementation is in 3D).
```
import sirf.contrib.kcl.Prior as pr
def update_bowsher_weights(prior,side_image,num_bowsher_neighbours):
return prior.BowshserWeights\
(side_image.as_array(),num_bowsher_neighbours)
```
For illustration, we will keep only a few neighbours in the Bowsher prior. This makes the contrast with "uniform" weights higher of course.
```
num_bowsher_neighbours = 3
myPrior = pr.Prior(anatomical_arr.shape)
BowsherWeights = update_bowsher_weights(myPrior,anatomical,num_bowsher_neighbours)
```
Ignore the warning about `divide by zero`, it is actually handled in the `kcl.Prior` class.
```
# compute indices of the neighbourhood for each voxel
nhoodIndVec=compute_nhoodIndVec(anatomical_arr.shape,BowsherWeights.shape)
# illustrate that only a few of the weights in the neighbourhood are kept
# (taking an arbitrary voxel)
print(BowsherWeights[500,:])
```
You could try to understand the neighbourhood structure using the following, but it is quite complicated due to the Fortran order and linear indices.
```
toLinearIndices=nhoodIndVec.reshape(BowsherWeights.shape,order='F')
print(toLinearIndices[500,:])
```
We will also use uniform weights where every neighbour is counted the same (often people will use 1/distance between voxels as weighting, but this isn't implemented here).
```
uniformWeights=BowsherWeights.copy()
uniformWeights[:,:]=1
# set "self-weight" of the voxel to zero
uniformWeights[:,27//2]=0
print(uniformWeights[500,:])
```
# Run some experiments
```
num_subsets = 21
num_subiters = 42
```
## Do a normal OSEM (for comparison and initialisation)
```
# Do initial OSEM recon
OSEM_reconstructor = get_reconstructor(num_subsets, num_subiters, obj_fun, init_image)
osem_image = init_image.clone()
OSEM_reconstructor.reconstruct(osem_image)
plt.figure()
imshow(osem_image.as_array()[60,:,:])
plt.show();
```
## Run MAP-EM with the 2 different sets of weights
To save some time, we will initialise the algorithms with the OSEM image. This makes sense of course as in the initial iterations, the penalty will just slow everything down (as it smooths an already too smooth image even more!).
```
# arbitrary value for the weight of the penalty. You might have to tune it
beta=1
```
Compute with Bowsher penalty
```
current_image=osem_image.clone()
for it in trange(1, num_subiters+1):
current_image = MAPEM_iteration(OSEM_reconstructor,current_image,BowsherWeights,nhoodIndVec,beta)
Bowsher=current_image.clone()
```
Compute with uniform weights (we'll call the result UQP for "uniform quadratic penalty")
```
current_image=osem_image.clone()
for it in trange(1, num_subiters+1):
current_image = MAPEM_iteration(OSEM_reconstructor,current_image,uniformWeights,nhoodIndVec,beta)
UQP=current_image.clone()
# Plot the anatomical, OSEM, and two MAPEM images
plt.figure()
cmax=osem_image.max()*.6
clim=[0,cmax]
subplot_([1,2,1],anatomical.as_array()[60,:,:],"anatomical")
subplot_([1,2,2],osem_image.as_array()[60,:,:],"OSEM",clim)
plt.figure()
subplot_([1,2,1],UQP.as_array()[60,:,:],"Uniform Quadratic prior",clim)
subplot_([1,2,2],Bowsher.as_array()[60,:,:],"Bowsher Quadratic prior",clim)
plt.figure()
y_idx=osem_image.dimensions()[1]//2
plt.plot(osem_image.as_array()[60,y_idx,:],label="OSEM")
plt.plot(UQP.as_array()[60,y_idx,:],label="Uniform Quadratic prior")
plt.plot(Bowsher.as_array()[60,y_idx,:],label="Bowsher Quadratic prior")
plt.legend()
```
You will probably see that the MAP-EM are quite smooth, and that there is very little difference between the "uniform" and "Bowsher" weights after this number of updates. The difference will get larger with higher number of updates (try it!).
Also, with the Bowsher weights you should be able to increase `beta` more than for the uniform weights without oversmoothing the image too much.
# Misalignment between anatomical and emission images
What happens if you want to use an anatomical prior but the image isn't aligned with the image you're trying to reconstruct?
You'll have to register them of course! Have a look at the [registration notebook](../Reg/sirf_registration.ipynb) if you haven't already.
The idea here would be to run an initial reconstruction (say, OSEM), and then register the anatomical image to the resulting reconstruction...
Once we've got the anatomical image in the correct space, we can calculate the Bowsher weights.
```
import sirf.Reg as Reg
registration = Reg.NiftyAladinSym()
registration.set_reference_image
registration.set_reference_image(osem_image)
registration.set_floating_image(anatomical)
registration.set_parameter('SetPerformRigid','1')
registration.set_parameter('SetPerformAffine','0')
registration.process()
anatomical_in_emission_space = registration.get_output()
Bweights = update_bowsher_weights(myPrior,anatomical_in_emission_space,num_bowsher_neighbours)
```
| true |
code
| 0.67658 | null | null | null | null |
|
### Import Package
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import traceback
import contextlib
import pathlib
```
### Load Dataset
```
mnist = tf.keras.datasets.fashion_mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("Train Image shape:", X_train.shape, "Test Image shape:", X_test.shape)
# Normalize the images
X_train = X_train / 255.0
X_test = X_test / 255.0
```
### Conv2D Model - Base Model
```
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Model summary
model.summary()
```
### Train Conv2D
```
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(X_train,
y_train,
batch_size=64,
epochs=10,
validation_data=(X_test, y_test))
# Saving Model
model.save('1_fashion_mnist_model.h5')
# Evaluate the model on test set
score = model.evaluate(X_test, y_test, verbose=0)
# Print test accuracy
print('\n', 'Test accuracy:', score[1])
```
### Train model with pruning
```
! pip install -q tensorflow-model-optimization
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
# Compute end step to finish pruning after 2 epochs.
batch_size = 128
epochs = 40
validation_split = 0.1 # 10% of training set will be used for validation set.
num_images = X_train.shape[0] * (1 - validation_split)
end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs
# Define model for pruning.
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,
final_sparsity=0.80,
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model_for_pruning.summary()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir='log'),
]
model_for_pruning.fit(X_train, y_train,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
_, model_for_pruning_accuracy = model_for_pruning.evaluate(
X_train, y_train, verbose=0)
print('Pruned test accuracy:', model_for_pruning_accuracy)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
tf.keras.models.save_model(model_for_export, '2_fashion_mnist_model_pruning.h5', include_optimizer=False)
```
### Q-aware Training
```
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
q_aware_model.summary()
# Train and evaluate the model against baseline
train_images_subset = X_train[0:1000] # out of 60000
train_labels_subset = y_train[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=10, epochs=50, validation_split=0.1)
# Evaluate the model on test set
import time
start_time_infer = time.time()
score = q_aware_model.evaluate(X_test, y_test, verbose=0)
# Print test accuracy
result = {'Time to full set infer': (time.time() - start_time_infer),
'Score' : score[1]}
print(result)
start_time_infer = time.time()
#model = tf.keras.models.load_model('fashion_mnist_model_qaware.h5', compile = True)
data = X_test[0]
data = data.reshape((1, 28, 28))
data_y = y_test[0:1]
score = q_aware_model.evaluate(data, data_y, verbose=0)
result = {'Time to single unit infer': (time.time() - start_time_infer),
'Score' : score[1]}
print(result)
q_aware_model.save('3_fashion_mnist_model_qaware.h5')
```
### Convert Model to TFLite
```
def ConvertTFLite(model_path, filename):
try:
# Loading Model
model = tf.keras.models.load_model(model_path)
# Converter
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
#Specify path
tflite_models_dir = pathlib.Path("tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
filename = filename+".tflite"
tflite_model_file = tflite_models_dir/filename
# Save Model
tflite_model_file.write_bytes(tflite_model)
return f'Converted to TFLite, path {tflite_model_file}'
except Exception as e:
return str(e)
ConvertTFLite('./1_fashion_mnist_model.h5','4_fashion_mnist_model')
ConvertTFLite('./2_fashion_mnist_model_pruning.h5','5_fashion_mnist_pruning_model')
converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model)
quantized_tflite_model = converter.convert()
quantized_aware_tflite_file = '6_fashion_mnist_model_qaware.tflite'
with open(quantized_aware_tflite_file, 'wb') as f:
f.write(quantized_tflite_model)
print('Saved quvantaised aware TFLite model to:', quantized_aware_tflite_file)
```
### Integer with Float fallback quantaization
```
def Quant_int_with_float(model_name, filename):
try:
model = tf.keras.models.load_model(model_name)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model_quant = converter.convert()
filename = filename+'.tflite'
tflite_models_dir = pathlib.Path("tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_quant_file = tflite_models_dir/filename
tflite_model_quant_file.write_bytes(tflite_model_quant)
return f'Converted - path {tflite_model_quant_file}'
except Exception as e:
return str(e)
Quant_int_with_float('./1_fashion_mnist_model.h5', '7_fashion_mnist_Integer_float_model')
Quant_int_with_float('./2_fashion_mnist_model_pruning.h5','8_fashion_mnist_pruning_Integer_float_model')
converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
mnist_train, _ = tf.keras.datasets.fashion_mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
yield [input_value]
converter.representative_dataset = representative_data_gen
quantized_tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_quant_file = tflite_models_dir/"9_fashion_mnist_Qaware_Integer_float_model.tflite"
tflite_model_quant_file.write_bytes(quantized_tflite_model)
```
### Float 16 Quantization
```
def Quant_float(model_name, filename):
try:
model = tf.keras.models.load_model(model_name)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_fp16_model = converter.convert()
filename = filename+'.tflite'
tflite_models_fp16_dir = pathlib.Path("tflite_models/")
tflite_models_fp16_dir.mkdir(exist_ok=True, parents=True)
tflite_model_fp16_file = tflite_models_fp16_dir/filename
tflite_model_fp16_file.write_bytes(tflite_fp16_model)
return f'Converted - path {tflite_model_fp16_file}'
except Exception as e:
return str(e)
Quant_float('./1_fashion_mnist_model.h5', '10_fashion_mnist_float16_model')
Quant_float('./2_fashion_mnist_model_pruning.h5', '11_fashion_mnist_float_pruning_model')
converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_fp16_model = converter.convert()
tflite_model_fp16_file = tflite_models_dir/"12_fashion_mnist_Qaware_float16_model.tflite"
tflite_model_fp16_file.write_bytes(tflite_fp16_model)
```
### Integer Only
```
def Quant_integer(model_name, filename):
try:
model = tf.keras.models.load_model(model_name)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
mnist_train, _ = tf.keras.datasets.fashion_mnist.load_data()
images = tf.cast(mnist_train[0], tf.float32) / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
def representative_data_gen():
for input_value in mnist_ds.take(100):
yield [input_value]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
tflite_int_quant_model = converter.convert()
filename = filename+'.tflite'
tflite_models_dir = pathlib.Path("tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_integeronly_file = tflite_models_dir/filename
tflite_model_integeronly_file.write_bytes(tflite_int_quant_model)
return f'Converted - path {tflite_model_integeronly_file}'
except Exception as e:
return str(e)
Quant_integer('./1_fashion_mnist_model.h5', '13_fashion_mnist_integeronly_model')
Quant_integer('./2_fashion_mnist_model_pruning.h5', '14_fashion_mnist_Integeronly_pruning_model')
# Quant_integer('3_fashion_mnist_model_qaware.h5','15_fashion_mnist_qaware_integer_model')
```
### Evalvate Model
```
import time
```
### Keras model Evaluation
```
def evaluate_keras_model_single_unit(model_path):
start_time_infer = time.time()
model = tf.keras.models.load_model(model_path, compile = True)
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
data = X_test[0]
data = data.reshape((1, 28, 28))
data_y = y_test[0:1]
score = model.evaluate(data, data_y, verbose=0)
result = {'Time to single unit infer': (time.time() - start_time_infer),
'Score' : score[1]}
return result
evaluate_keras_model_single_unit('./1_fashion_mnist_model.h5')
evaluate_keras_model_single_unit('./2_fashion_mnist_model_pruning.h5')
def evaluate_keras_model_test_set(model_path):
start_time_infer = time.time()
model = tf.keras.models.load_model(model_path, compile = True)
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
score = score = model.evaluate(X_test, y_test, verbose =0)
result = {'Time to single unit infer': (time.time() - start_time_infer),
'Score' : score[1]}
return result
evaluate_keras_model_test_set('./1_fashion_mnist_model.h5')
evaluate_keras_model_test_set('./2_fashion_mnist_model_pruning.h5')
```
### TF Lite Model Evaluvation
```
# Evaluate the mode
def evaluate_tflite_model_test_set(interpreter):
start_time = time.time()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in X_test:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == y_test[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
results = {'time': (time.time() - start_time),
'accuracy': accuracy}
return results
```
### TF Lite Models
```
# TF Lite
tflite_model_file = 'tflite_models/4_fashion_mnist_model.tflite'
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
evaluate_tflite_model_test_set(interpreter)
# Purning TF Lite
tflite_pruning_model_file = 'tflite_models/5_fashion_mnist_pruning_model.tflite'
interpreter_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file))
interpreter_pruning.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_pruning)
# Qaware Model
tflite_model_file = '6_fashion_mnist_model_qaware.tflite'
interpreter_qaware = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_qaware.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_qaware)
```
### Integer Float TF Lite models
```
# TF Lite
tflite_model_file = 'tflite_models/7_fashion_mnist_Integer_float_model.tflite'
interpreter_int_float = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_int_float.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_int_float)
# Purning TF Lite
tflite_pruning_model_file = 'tflite_models/8_fashion_mnist_pruning_Integer_float_model.tflite'
interpreter_int_float_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file))
interpreter_int_float_pruning.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_int_float_pruning)
# Q-aware TF Lite
tflite_qaware_model_file = 'tflite_models/9_fashion_mnist_Qaware_Integer_float_model.tflite'
interpreter_tflite_qaware = tf.lite.Interpreter(model_path=str(tflite_qaware_model_file))
interpreter_tflite_qaware.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_tflite_qaware)
```
### Float Tflite
```
# TF Lite
tflite_model_file = 'tflite_models/10_fashion_mnist_float16_model.tflite'
interpreter_float = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_float.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_float)
# Purning TF Lite
tflite_pruning_model_file = 'tflite_models/11_fashion_mnist_float_pruning_model.tflite'
interpreter_float_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file))
interpreter_float_pruning.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_float_pruning)
tflite_qaware_model_file = 'tflite_models/12_fashion_mnist_Qaware_float16_model.tflite'
interpreter_tflite_qaware = tf.lite.Interpreter(model_path=str(tflite_qaware_model_file))
interpreter_tflite_qaware.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_tflite_qaware)
```
### Integer Only TFlite
```
# TF Lite
tflite_model_file = 'tflite_models/13_fashion_mnist_integeronly_model.tflite'
interpreter_int = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter_int.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_int)
# Purning TF Lite
tflite_pruning_model_file = 'tflite_models/14_fashion_mnist_Integeronly_pruning_model.tflite'
interpreter_int_pruning = tf.lite.Interpreter(model_path=str(tflite_pruning_model_file))
interpreter_int_pruning.allocate_tensors()
evaluate_tflite_model_test_set(interpreter_int_pruning)
# tflite_qaware_model_file = 'tflite_models/15_fashion_mnist_Qaware_integer_model.tflite'
# interpreter_tflite_qaware = tf.lite.Interpreter(model_path=str(tflite_qaware_model_file))
# interpreter_tflite_qaware.allocate_tensors()
# evaluate_tflite_model_test_set(interpreter_tflite_qaware)
```
### Find unit inference time
```
# Evaluate the mode
def evaluate_tflite_model_single_unit(interpreter):
start_time = time.time()
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
test_image = np.expand_dims(X_test[0], axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
results = {'time': (time.time() - start_time)}
return results
# TF Lite
evaluate_tflite_model_single_unit(interpreter)
evaluate_tflite_model_single_unit(interpreter_pruning)
evaluate_tflite_model_single_unit(interpreter_int_float)
evaluate_tflite_model_single_unit(interpreter_qaware)
evaluate_tflite_model_single_unit(interpreter_int_float_pruning)
evaluate_tflite_model_single_unit(interpreter_float)
evaluate_tflite_model_single_unit(interpreter_float_pruning)
evaluate_tflite_model_single_unit(interpreter_int)
evaluate_tflite_model_single_unit(interpreter_float_pruning)
```
| true |
code
| 0.801839 | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.