text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
You now know the following
1. Generate open-loop control from a given route
2. Simulate vehicular robot motion using bicycle/ unicycle model
Imagine you want to make an utility for your co-workers to try and understand vehicle models.
Dashboards are common way to do this.
There are several options out there : Streamlit, Voila, Observable etc
Follow this
<a href="https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e">Medium post</a> on Jupyter Dash and see how to package what you learnt today in an interactive manner
Here is a <a href="https://stackoverflow.com/questions/53622518/launch-a-dash-app-in-a-google-colab-notebook">stackoverflow question </a> on how to run dash applications on Collab
What can you assume?
+ Fix $v,\omega$ or $v,\delta$ depending on the model (users can still pick the actual value)
+ fixed wheelbase for bicycle model
Users can choose
+ unicycle and bicycle models
+ A pre-configured route ("S", "inverted-S", "figure-of-eight" etc)
+ 1 of 3 values for $v, \omega$ (or $\delta$)
```
!pip install jupyter-dash
import plotly.express as px
from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import numpy as np
# Load Data
velocities = ['1','2','3']
omegas = ['15','30','45']
shapes = ["S", "Inverted-S", "Figure of 8"]
models = ["Unicycle", "Bicycle"]
def unicycle_model(curr_pose, v, w, dt=1.0):
'''
>>> unicycle_model((0.0,0.0,0.0), 1.0, 0.0)
(1.0, 0.0, 0.0)
>>> unicycle_model((0.0,0.0,0.0), 0.0, 1.0)
(0.0, 0.0, 1.0)
>>> unicycle_model((0.0, 0.0, 0.0), 1.0, 1.0)
(1.0, 0.0, 1.0)
'''
## write code to calculate next_pose
# refer to the kinematic equations of a unicycle model
x, y, theta = curr_pose
x += v*np.cos(theta)*dt
y += v*np.sin(theta)*dt
theta += w*dt
# Keep theta bounded between [-pi, pi]
theta = np.arctan2(np.sin(theta), np.cos(theta))
# return calculated (x, y, theta)
return x, y, theta
def bicycle_model(curr_pose, v, delta, dt=1.0):
'''
>>> bicycle_model((0.0,0.0,0.0), 1.0, 0.0)
(1.0, 0.0, 0.0)
>>> bicycle_model((0.0,0.0,0.0), 0.0, np.pi/4)
(0.0, 0.0, 0.0)
>>> bicycle_model((0.0, 0.0, 0.0), 1.0, np.pi/4)
(1.0, 0.0, 1.11)
'''
# write code to calculate next_pose
# refer to the kinematic equations of a bicycle model
#x, y, theta =
#x =
#y =
#theta =
L = 0.9
x, y, theta = curr_pose
x += v*np.cos(theta)*dt
y += v*np.sin(theta)*dt
theta += (v/L)*np.tan(delta)*dt
# Keep theta bounded between [-pi, pi]
theta = np.arctan2(np.sin(theta), np.cos(theta))
# return calculated (x, y, theta)
return x, y, theta
def get_open_loop_commands(route, vc_fast=1, wc=np.pi/12, dt=1.0):
all_w = []
omegas = {'straight': 0, 'left': wc, 'right': -wc}
for manoeuvre, command in route:
u = np.ceil(command/vc_fast).astype('int')
v = np.ceil(np.deg2rad(command)/wc).astype('int')
t_cmd = u if manoeuvre == 'straight' else v
all_w += [omegas[manoeuvre]]*t_cmd
all_v = vc_fast * np.ones_like(all_w)
return all_v, all_w
def get_commands(shape):
if(shape == shapes[0]):
return [("right", 180),("left", 180)]
elif(shape == shapes[1]):
return [("left", 180),("right", 180)]
return [("right", 180),("left", 180),("left", 180),("right", 180)]
def get_angle(omega):
if(omega == omegas[0]):
return np.pi/12
elif(omega == omegas[1]):
return np.pi/6
return np.pi/4
# Build App
app = JupyterDash(__name__)
app.layout = html.Div([
html.H1("Unicycle/Bicycle"),
html.Label([
"velocity",
dcc.Dropdown(
id='velocity', clearable=False,
value='1', options=[
{'label': c, 'value': c}
for c in velocities
])
]),
html.Label([
"omega/delta",
dcc.Dropdown(
id='omega', clearable=False,
value='15', options=[
{'label': c, 'value': c}
for c in omegas
])
]),
html.Label([
"shape",
dcc.Dropdown(
id='shape', clearable=False,
value='S', options=[
{'label': c, 'value': c}
for c in shapes
])
]),
html.Label([
"model",
dcc.Dropdown(
id='model', clearable=False,
value='Unicycle', options=[
{'label': c, 'value': c}
for c in models
])
]),
dcc.Graph(id='graph'),
])
# Define callback to update graph
@app.callback(
Output('graph', 'figure'),
[Input("velocity", "value"), Input("omega", "value"), Input("shape", "value"), Input("model", "value")]
)
def update_figure(velocity, omega, shape, model):
robot_trajectory = []
all_v, all_w = get_open_loop_commands(get_commands(shape), int(velocity), get_angle(omega))
pose = (0, 0, np.pi/2)
for v, w in zip(all_v, all_w):
robot_trajectory.append(pose)
if model == models[0]:
pose = unicycle_model(pose, v, w)
else:
pose = bicycle_model(pose,v,w)
robot_trajectory = np.array(robot_trajectory)
dt = pd.DataFrame({'x-axis': robot_trajectory[:,0],'y-axis': robot_trajectory[:,1]})
return px.line(dt, x="x-axis", y="y-axis", title='Simulate vehicular robot motion using unicycle/bicycle model')
# Run app and display result inline in the notebook
app.run_server(mode='inline')
```
|
github_jupyter
|
## Expressões Regulares
Uma expressão regular é um método formal de se especificar um padrão de texto.
Mais detalhadamente, é uma composição de símbolos, caracteres com funções especiais, que agrupados entre si e com caracteres literais, formam uma sequência, uma expressão,Essa expressão é interpretada como uma regra que indicará sucesso se uma entrada de dados qualquer casar com essa regra ou seja obdecer exatamente a todas as suas condições.
```
# importando o módulo re(regular expression)
# Esse módulo fornece operações com expressões regulares (ER)
import re
# lista de termos para busca
lista_pesquisa = ['informações', 'Negócios']
# texto para o parse
texto = 'Existem muitos desafios para o Big Data. O primerio deles é a coleta dos dados, pois fala-se aquie de'\
'enormes quantidades sendo geradas em uma taxa maior do que um servidor comum seria capaz de processar e armazenar.'\
'O segundo desafio é justamente o de processar essas informações. Com elas então distribuídas, a aplicação deve ser'\
'capaz de consumir partes das informações e gerar pequenas quantidades de dados processados, que serão calculados em'\
'conjunto depois para criar o resultado final. Outro desafio é a exibição dos resultados, de forma que as informações'\
'estejam disponíveis de forma clara para os tomadores de decisão.'
# exemplo básico de Data Mining
for item in lista_pesquisa:
print('Buscando por "%s" em :\n\n"%s"'% (item, texto))
# verificando o termo da pesquisa existe no texto
if re.search(item, texto):
print('\n')
print('Palavra encontrada. \n')
print('\n')
else:
print('\n')
print('Palavra não encontrada. \n')
print('\n')
# termo usado para dividir uma string
split_term = '@'
frase = 'Qual o domínio de alguém com o e-mail: [email protected]'
# dividindo a frase
re.split(split_term, frase)
def encontrar_padrao(lista, frase):
for item in lista:
print('Pesquisa na f: %r'% item)
print(re.findall(item, frase))
print('\n')
frase_padrao = 'zLzL..zzzLLL...zLLLzLLL...LzLz..dzzzzz...zLLLLL'
lista_padroes = [ 'zL*', # z seguido de zero ou mais L
'zL+', # z seguido por um ou mais L
'zL?', # z seguido por zero ou um L
'zL{3}', # z seguido por três L
'zL{2, 3}', # z seguido por dois a três L
]
encontrar_padrao(lista_padroes, frase_padrao)
frase = 'Esta é uma string com pontuação. Isso pode ser um problema quando fazemos mineração de dados em busca'\
'de padrões! Não seria melhor retirar os sinais ao fim de cada frase?'
# A expressão [^!.?] verifica por valores que não sejam pontuação
# (!, ., ?) e o sinal de adição (+) verifica se o item aparece pelo menos
# uma vez. Traduzindo: esta expressão diz: traga apenas as palavras na
# frase
re.findall('[^!.? ]+', frase)
frase = 'Está é uma frase do exemplo. Vamos verificar quais padrões serâo encontradas.'
lista_padroes = ['[a-z]+', # sequência de letras minúsculas
'[A-Z]+', # sequência de letras maiúsculas
'[a-zA-Z]+', # sequência de letras minúculas e maiúsculas
'[A-Z][a-z]+'] # uma letra maiúscula, seguida de uma letra minúscula
encontrar_padrao(lista_padroes, frase)
```
## Escape code
É possível usar códigos específicos para enocntrar padrões nos dados, tais como dígitos, não dígitos, espaços, etc..
<table border="1" class="docutils">
<colgroup>
<col width="14%" />
<col width="86%" />
</colgroup>
<thead valign="bottom">
<tr class="row-odd"><th class="head">Código</th>
<th class="head">Significado</th>
</tr>
</thead>
<tbody valign="top">
<tr class="row-even"><td><tt class="docutils literal"><span class="pre">\d</span></tt></td>
<td>um dígito</td>
</tr>
<tr class="row-odd"><td><tt class="docutils literal"><span class="pre">\D</span></tt></td>
<td>um não-dígito</td>
</tr>
<tr class="row-even"><td><tt class="docutils literal"><span class="pre">\s</span></tt></td>
<td>espaço (tab, espaço, nova linha, etc.)</td>
</tr>
<tr class="row-odd"><td><tt class="docutils literal"><span class="pre">\S</span></tt></td>
<td>não-espaço</td>
</tr>
<tr class="row-even"><td><tt class="docutils literal"><span class="pre">\w</span></tt></td>
<td>alfanumérico</td>
</tr>
<tr class="row-odd"><td><tt class="docutils literal"><span class="pre">\W</span></tt></td>
<td>não-alfanumérico</td>
</tr>
</tbody>
</table>
```
# O prefixo r antes da expressão regular evita o pré-processamenta da ER
# pela linguage. Colocamos o modificador r (do inglês 'raw', crú)
# imediatamente antes das aspas
r'\b'
'\b'
frase = 'Está é uma string com alguns números, como 1287 e um símbolo #hashtag'
lista_padroes = [r'\d+', # sequência de digitos
r'\D+', # sequência de não digitos
r'\s+', # sequência de espaços
r'\S+', # sequência de não espaços
r'\w+', # caracteres alganuméricos
r'\W+', # não-alfanúmerico
]
encontrar_padrao(lista_padroes, frase)
```
|
github_jupyter
|
# Optical Data Reduction using Python
by Steve Crawford (South African Astronomical Observatory)
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
In addition to instrument specific python pipelines, there now exists a suite of tools available for general reduction of optical observations. This includes *ccdproc*, an astropy affiliated package for basic CCD reductions. The package is useful from student tutorials for learning CCD reductions to building science-quality reduction pipelines for observatories. In addition, we also present specreduce, a python package for reducing optical spectroscopy. The package includes an interactive graphical users interface for line identification as well as tools for extracting spectra. With this set of tools, pipelines can be built for instruments in relatively short times. While nearly complete, further improvements and enhancements are still needed and contributions are welcome.
# Introduction
For our purposes, we will use the [same data](http://iraf.noao.edu/iraf/ftp/iraf/misc/) in the [IRAF tutorials](http://iraf.noao.edu/tutorials/). However, we will show how to reduce the data in the same way only using current general purpose python tools.
```
import numpy as np
from matplotlib import pyplot as plt
from astropy import units as u
from astropy.io import fits
from astropy import modeling as mod
```
## CCD Reduction
```
import ccdproc
from ccdproc import CCDData, ImageFileCollection
```
[*ccdproc*](https://github.com/astropy/ccdproc) is an astropy affiliated package for handling CCD data reductions. The code contains all the necessary content to produce pipelines for basic CCD reductions. Here we use *ccdproc* to reduce a spectroscopic data set.
The *ImageFileCollection* class in *ccdproc* is useful for reading and sorting of the FITS files in a directory.
```
from ccdproc import ImageFileCollection
image_dir = 'exercises/spec/'
ic = ImageFileCollection('exercises/spec/') #read in all FITS files in the directory
ic.summary
```
```
# create the master biasframe
# function used for fitting bias frames
cheb_1 = mod.models.Chebyshev1D(1)
#create a list of bias frames
bias_list = []
for hdu, fname in ic.hdus(return_fname=True, object='biases 1st afternoon'):
ccd = CCDData.read(image_dir+fname, unit='adu')
ccd = ccdproc.subtract_overscan(ccd, fits_section='[105:130,1:1024]')
ccd = ccdproc.trim_image(ccd, fits_section="[34:74,1:1022]")
bias_list.append(ccd)
#combine into master bias frame
master_bias = ccdproc.combine(bias_list, method='average', clip_extrema=True,
nlow = 1, nhigh = 1)
#process flat data
# In this case we use ccdproc.ccd_process instead of each individual step
#create a list of flat frames
flat_list = []
for hdu, fname in ic.hdus(return_fname=True, object='flats-6707'):
ccd = CCDData.read(image_dir+fname, unit='adu')
ccd = ccdproc.ccd_process(ccd, oscan='[105:130,1:1024]', oscan_model=cheb_1,
trim="[34:74,1:1022]", master_bias=master_bias)
flat_list.append(ccd)
#combine into a single master flat
master_flat = ccdproc.combine(flat_list, method='average', sigma_clip=True,
low_thresh=3, high_thresh=3)
#process the sky flat data
#create a list of sky flat frames
skyflat_list = []
exp_list = []
for fname in ['sp0011.fits','sp0012.fits','sp0013.fits']:
ccd = CCDData.read(image_dir+fname, unit='adu')
exp_list.append(ccd.header['EXPTIME'])
ccd = ccdproc.ccd_process(ccd, oscan='[105:130,1:1024]', oscan_model=cheb_1,
trim="[34:74,1:1022]", master_bias=master_bias)
skyflat_list.append(ccd)
#combine into a single master flat
master_sky = ccdproc.combine(skyflat_list, method='average', scale=np.median,
weights=np.array(exp_list), sigma_clip=True,
slow_thresh=3, high_thresh=3)
# correct for the response function
cheb_5 = mod.models.Chebyshev1D(5)
fitter = mod.fitting.LinearLSQFitter()
fy = master_flat.data.sum(axis=1)/master_flat.shape[1]
yarr = np.arange(len(fy))
resp_func = fitter(cheb_5, yarr, fy)
response = master_flat.divide(resp_func(yarr).reshape((len(yarr),1))*u.dimensionless_unscaled)
# correct for the illumination correction
sky = master_sky.divide(resp_func(yarr).reshape((len(yarr),1))*u.dimensionless_unscaled)
cheb_22 = mod.models.Chebyshev2D(2,2)
yarr, xarr = np.indices(sky.data.shape)
illum = fitter(cheb_22, xarr, yarr, sky.data)
# add fitting with rejection
# todo update to fit set regions
sky.data = illum(xarr, yarr)
sky.data = sky.divide(sky.data.mean())
super_flat = sky.multiply(response)
img_list = []
for fname in ['sp0018.fits', 'sp0020.fits','sp0021.fits','sp0022.fits', 'sp0023.fits'
,'sp0024.fits','sp0025.fits', 'sp0027.fits']:
ccd = CCDData.read(image_dir+fname, unit='adu')
hdr = ccd.header
ccd = ccdproc.ccd_process(ccd, oscan='[105:130,1:1024]', oscan_model=cheb_1,
trim="[34:74,1:1022]", master_bias=master_bias,
master_flat=super_flat)
# add comsic ray cleaning
ccd = ccdproc.cosmicray_lacosmic(ccd, sigclip=3., sigfrac=0.3,
gain=hdr['GAIN'], readnoise=hdr['RDNOISE'])
img_list.append(ccd)
ccd.write('p'+fname, clobber=True)
```
## Spectroscopic Reductions
```
from specreduce.interidentify import InterIdentify
from specreduce import spectools as st
from specreduce import WavelengthSolution
```
[*specreduce*](https://github.com/crawfordsm/specreduce) is a package for handling the wavelength calibration of spectroscopic data. The code contains all the necessary content to identify arc lines and to rectify spectra. It can be used for longslit, multi-object, or echelle spectrographs.
```
# read in the line lists -- if line ratios are availabe, it is easier to find
# an automatic solution
slines = np.loadtxt('thorium.dat')
sfluxes = np.ones_like(slines)
# setup the data and correct for the orientation of the data
# so that it is
arc1 = img_list[0]
data = arc1.data.T
data = data[:,::-1]
xarr = np.arange(data.shape[1])
istart = int(data.shape[0]/2.0)
# initial guess for the wavelength solution
ws_init = mod.models.Chebyshev1D(3)
ws_init.domain = [xarr.min(), xarr.max()]
ws = WavelengthSolution.WavelengthSolution(xarr, xarr, ws_init)
iws = InterIdentify(xarr, data, slines, sfluxes, ws, mdiff=20, rstep=5,
function='poly', order=3, sigma=3, niter=5, wdiff=0.5,
res=0.2, dres=0.05, dc=3, ndstep=50, istart=istart,
method='Zeropoint', smooth=0, filename=None,
subback=0, textcolor='black', log=None, verbose=True)
# correct for curvature in the arc
ws_init = mod.models.Chebyshev1D(2)
ws_init.domain = [xarr.min(), xarr.max()]
ws = WavelengthSolution.WavelengthSolution(xarr, xarr, ws_init)
aws = st.arc_straighten(data, istart, ws, rstep=1)
# create wavelength map and apply wavelength solution to all data
wave_map = st.wave_map(data, aws)
k = 20.5
ws = iws[k]
for i in range(data.shape[0]):
wave_map[i,:] = iws[k](wave_map[i,:])
# extra the data
obj_data = img_list[3].data
obj_data = obj_data.T
obj_data = obj_data[:,::-1]
plt.imshow(obj_data, aspect='auto')
ax = plt.gca()
xt = ax.get_xticks()
ax.set_xticklabels([int(x) for x in ws(xt)])
plt.xlabel('Wavelength ($\AA$)', size='x-large')
ax.set_yticklabels([])
plt.savefig('spec2d.pdf')
plt.show()
#ax.set_yticklabels([])
# sum the spectra between two steps
# The spectra could be traced for better results
# or it could extracted using more optimum methods
warr = ws(xarr)
flux = np.zeros_like(warr)
for i in range(18,25):
f = np.interp(warr, wave_map[i], obj_data[i])
flux += f
sky = np.zeros_like(warr)
for i in range(25,32):
f = np.interp(warr, wave_map[i], obj_data[i])
sky += f
plt.plot(warr, flux - sky)
plt.xlabel('Wavelength ($\AA$)', size='x-large')
plt.ylabel('counts')
plt.savefig('spec1d.pdf')
plt.show()
#import pickle
#pickle.dump(iws, open('iws.db', 'wb'))
#iws = pickle.load(open('iws.db', 'rb'))
```
|
github_jupyter
|
HMMs Library
============================
#### (Discrete & Continuous hidden markov models )
The document contain the tutorial ( usage explained by example ) for the hidden markov models library [link to pip].
* [The **first** part](#dthmm) will cover disrete-time hidden markov model (**DtHMM**)
* [The **second** part](#cthmm) will be dedicated to continuous-time hidden markov model (**CtHMM**)
* [The **third** part](#conv) will compare the convergences of **both** models
* [The **fourth** part](#dataset) will explain how to use more complex **datasets** and run **multiple trains** by one function call
The all of the part are independent, so you do not need to run all notebook, if you are interested only in one of them.
If you are not familiar with the hidden markov model theory, We recommend ...
%todo: refer to DP theory, (simple guide to cthmm?), github, sources
<a id='dthmm'></a>
Part 1: Discrete Time Hidden Markov Model
---------------------------------------------------
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
```
### Construct DtHMM
You can directly initialize the DtHMM by passing the **model parameters**.
We will create simple DtHMM of two hidden states and three output variables.
```
# A is the matrix of transition probabilities from state [row] to state [column].
A = np.array([[0.9,0.1],[0.4,0.6]])
# B is the matrix of probabilities that the state [row] will emmit output variable [column].
B = np.array([[0.9,0.08,0.02],[0.2,0.5,0.3]])
# Pi is the vector of initial state probabilities.
Pi = np.array( [0.8,0.2] )
# Create DtHMM by given parameters.
dhmm = hmms.DtHMM(A,B,Pi)
```
Or you can initialize it by **random parameters**. Passing the number of hidden states and output variables.
```
dhmm_random = hmms.DtHMM.random(2,3)
```
### Save & Read from File
Once you have created the model you can **save** its parameters in file simply by calling *save_params* method.
```
dhmm.save_params("hello_dthmm")
```
The method stored the parameters in *.npz* format.
The saved file can be later used to **read** parametrs for model initialization.
```
dhmm_from_file = hmms.DtHMM.from_file( "hello_dthmm.npz" )
```
### Set & Get Parameters
Later you can always **set** parameters with triple of methods corresponding to the constructors.
```
dhmm.set_params(A,B,Pi)
dhmm.set_params_random(2,3)
dhmm.set_params_from_file( "hello_dthmm.npz" )
```
You can **get** parameters by calling them separately,
```
dhmm.a, dhmm.b, dhmm.pi
```
or **get** them **all** together as the triple.
```
(A,B,Pi) = dhmm.params
```
### Generate Random State and Emission Sequence
Now we can use our model to generate state and emission sequence.
The model will randomly choose which transition or emission will happen, taking into consideration the parameters we have previously defined.
```
seq_len = 20
s_seq, e_seq = dhmm.generate( seq_len )
#resize plot
plt.rcParams['figure.figsize'] = [20,20]
hmms.plot_hmm( s_seq, e_seq )
```
### Find Most Likely State Sequence
If we have the model parameters and emission sequence, we can find the most probable state sequence that would generate it. Notice, that it can be different, than the actual sequence that has generated the emissions.
We will use Viterbi algorithm for the calculation.
```
( log_prob, s_seq ) = dhmm.viterbi( e_seq )
# Let's print the most likely state sequence, it can be same or differ from the sequence above.
hmms.plot_hmm( s_seq, e_seq )
```
The *log_prob* parameter store the probability of the sequence.
All the probabilities in the library are stored in the logarithm of their actual value. As the number of possible sequences grows exponentialy by it length, it could easily lead to float underflow.
You can easily transform it to the normal scale value applying *exp* function.
```
np.exp( log_prob )
```
### State Confidence
We may want to know what is the probability that the emission was generated by some concrete state. You can get the result for every state in every time by calling the method *states_confidence*. **Notice** that the viterbi most probable sequence, presented above, doesn't neccessery contain the most probable states from this method as here is not important the consecutive states transition probability.
```
log_prob_table = dhmm.states_confidence( e_seq )
np.exp( log_prob_table )
```
<a id='dtest'></a>
### The Probability of the Emission Sequence
We can compute the probability, that the model will generate the emission sequence.
```
np.exp( dhmm.emission_estimate( e_seq ) )
```
### The Probability of the State and Emission Sequences
Similary we can compute the probabilty of the state and emission sequences given the model parameters.
```
np.exp( dhmm.estimate( s_seq, e_seq ) )
```
**Notice!** - You can simply count the probability estimations for whole dataset by one command, watch [The chapter 4](#dsest).
### Generate Artificial Dataset
You can easily generate many sequences in once by using the generate_data function.
The generated emission sequences are in the form that is suitable for training of parameters. You can switch times=True, if you want to generate also the corresponding equidistant time sequences.
```
seq_num= 3 #number of data sequences
seq_len= 10 #length of each sequence
dhmm.generate_data( (seq_num,seq_len) )
```
### Parameters Estimation - Baum Welch Algorithm
We usually do not know the real parameters of the model. But, if we have sufficient data, we can estimate them by EM algorithm.
Here we will have several output variables (emissions) sequences and we will show, how to use them to train the model parameters
Let's start by creating some artifficial data. We will use the previously defined *dhmm* model for it.
**Notice!** - For more detail information about possible datasets watch [The chapter 4](#datasets).
```
seq_num = 5
seq_len = 50
_ , data = dhmm.generate_data( (seq_num,seq_len) )
data
```
Now, we will create the model with random parameters, that will be eventually trained to match the data.
```
dhmm_r = hmms.DtHMM.random( 2,3 )
# We can print all the parameters.
hmms.print_parameters( dhmm_r )
```
Let's compare the dataset likelihood estimation of model used for generating the data and the random parameters model.
```
print( "Generator model:" , np.exp( dhmm.data_estimate(data) ) )
print( "Random model: " ,np.exp( dhmm_r.data_estimate(data) ) )
```
Most likely the probability that the data was generated by random model is extremly low.
Now we can take the random model and reestimate it to fit the data better.
```
dhmm_r.baum_welch( data, 10 )
print( "Reestimated model after 10 iterations: " ,np.exp( dhmm_r.data_estimate(data) ) )
```
The probability of the reestimated model should now be similiar (possibly even higher) that the generator's model. If it is not, you can try to run the estimation procedure more time at different randomly generated models. It could happen that the estimation fall in the local optima.
If you are satisfied with the results, you can run some more iteration to fine-tune it.
```
dhmm_r.baum_welch( data, 100 )
print( "Reestimated model after 110 iterations: " ,np.exp( dhmm_r.data_estimate(data) ) )
```
We can compare the parameters of the model.
```
hmms.print_parameters( dhmm_r )
hmms.print_parameters( dhmm )
```
Alternatively, we can run *baum_welch_graph* method to get learning curve of estimated probabilities.
```
dhmm_r = hmms.DtHMM.random(2,3)
out = dhmm_r.baum_welch( data, 50, est=True )
np.exp(out)
```
Let's plot it in the graph, comparing the results in ratio with *real* data-generator model. ( Notice, it is the ratio of logaritmic probability values. )
```
real = dhmm.data_estimate(data)
#For better visibility of the graph, we cut first two values.
plt.plot( out[2:] / real )
plt.show()
```
### Maximum Likelihood Estimation
Sometimes, we can have a dataset of full observations (i.e. both emission and hidden states sequences). We can use method *maximum_likelihood estimation* to estimate most likely parameters.
```
seq_num = 5
seq_len = 50
#generate artificial dataset of both hidden states and emissions sequences
s_seqs , e_seqs = dhmm.generate_data( (seq_num,seq_len) )
dhmm_r = hmms.DtHMM.random(2,3)
dhmm_r.maximum_likelihood_estimation(s_seqs,e_seqs)
log_est = dhmm.full_data_estimate ( s_seqs, e_seqs )
log_est_mle = dhmm_r.full_data_estimate( s_seqs, e_seqs )
print("The probability of the dataset being generated by the original model is:", \
np.exp(log_est), "." )
print("The probability of the dataset being generated by the MLE model is:", \
np.exp(log_est_mle), "." )
```
For the discrete-time model will be the probability of dataset estimated by parameters from *maximum_likelihood_estimation* always higher equal to probability of being generated by original model. It is the consequence of statistical inaccuracy of dataset.
<a id='cthmm'></a>
Part 2: Continuous Time Hidden Markov Model
-----------------------------------------------------
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
```
### Construct CtHMM
Construction of CtHMM is similar to the discrete model.
You can directly initialize the CtHMM by passing the **model parameters**.
We will create simple CtHMM of three hidden states and three output variables.
```
# Q is the matrix of transition rates from state [row] to state [column].
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
# B is the matrix of probabilities that the state [row] will emmit output variable [column].
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
# Pi is the vector of initial state probabilities.
Pi = np.array( [0.6,0,0.4] )
# Create CtHMM by given parameters.
chmm = hmms.CtHMM(Q,B,Pi)
```
Or you can initialize it by **random parameters**. Passing the number of hidden states and output variables.
By default are the parameters generated by exponential distribution and than normalized to sum to one.
```
chmm_random = hmms.CtHMM.random(3,3)
```
You can choose generating by uniform distribution passing the parameter *method*.
```
chmm_random = hmms.CtHMM.random(3,3,method="unif")
```
### Save & Read from File
Once you have created the model you can save its parameters in file simply by calling save_params method.
```
chmm.save_params("hello_cthmm")
```
The method stored the parameters in .npz format.
The saved file can be later used to read parametrs for model initialization.
```
chmm_from_file = hmms.CtHMM.from_file( "hello_cthmm.npz" )
```
### Set & Get Parameters
Later you can always set parameters with triple of methods corresponding to the constructors.
```
chmm.set_params(Q,B,Pi)
chmm.set_params_random(3,3)
chmm.set_params_from_file( "hello_cthmm.npz" )
```
You can **get** parameters by calling them separately,
```
chmm.q, chmm.b, chmm.pi
```
or get them all together as the triple.
```
(A,B,Pi) = chmm.params
```
### Generate Random Sequence
Now we can use our model to **generate** time, state and emission sequence.
The model will **randomly** choose which transition or emission will happen, taking into consideration the parameters we have previously defined.
The times are generated with **exponencial** waiting times, you can define the parameter of exponencial distribution as second optional parameter.
```
seq_len = 10
t_seq, s_seq, e_seq = chmm.generate( seq_len, 0.5)
#resize plot
plt.rcParams['figure.figsize'] = [20,20]
hmms.plot_hmm( s_seq, e_seq, time=t_seq )
```
Optionally, you can generate the sequences by putting your own time sequence (as list or numpy array ) with wished observation times.
```
t_seq, s_seq, e_seq = chmm.generate( 7, time=[0,3,5,7,8,11,14])
#resize plot
plt.rcParams['figure.figsize'] = [20,20]
hmms.plot_hmm( s_seq, e_seq, time=t_seq )
```
### Find Most Likely State Sequence
If we have corresponding time and emission sequence, we can find the most probable state sequence that would generate it given the current model parameters. Notice, that it can be different, than the actual sequence that has generated the emissions.
We will use Viterbi algorithm for the calculation.
```
( log_prob, s_seq ) = chmm.viterbi( t_seq, e_seq )
# Let's print the most likely state sequence, it can be same or differ from the sequence above.
hmms.plot_hmm( s_seq, e_seq, time = t_seq )
print( "Probability of being generated by the found state sequence:", np.exp( log_prob ) )
```
### State Confidence
We may want to know what is the probability that the emission was generated by some concrete state. You can get the result for every state in every time by calling the method *states_confidence*. **Notice** that the viterbi most probable sequence, presented above, doesn't neccessery contain the most probable states from this method as here is not important the consecutive states transition probability.
```
log_prob_table = chmm.states_confidence( t_seq, e_seq )
np.exp( log_prob_table )
```
### The Probability of the Time and Emission Sequences
We can compute the probability, of the emission sequence given model and its time sequence.
```
np.exp( chmm.emission_estimate( t_seq, e_seq ) )
```
### The Probability of the State, Time and Emission Sequences
Similary we can compute the probabilty of the state, time and emission sequences given the model parameters.
```
np.exp( chmm.estimate( s_seq, t_seq, e_seq ) )
```
**Notice!** - You can simply count the probability estimations for whole dataset by one command, watch [The chapter 4](#dsest).
### Generate Artificial Dataset
You can easily generate many sequences in once by using the *generate_data* function.
The generated data are in the form that is suitable for training of parameters.
You can switch *states=True*, if you want to generate also the corresponding state sequences.
The times are generated with **exponencial** waiting times, you can define the parameter of exponencial distribution as second optional parameter.
```
seq_num= 5 #number of data sequences
seq_len= 30 #length of each sequence
t,e = chmm.generate_data( (seq_num,seq_len) )
t,e
```
### Parameters Estimation - Continuous Version of Baum Welch Algorithm
We will use the previously generated data for the training of randomly generated model.
**Notice!** - Always use the integers in your time points dataset. Floats times are also supported, but it can make the computation significantly slower and you should know, why you are using them. For more detail information watch [The chapter 4](#dataset).
```
chmm_r = hmms.CtHMM.random( 3,3 )
# We can print all the parameters.
hmms.print_parameters( chmm_r )
```
Now we can compare the probabilities, that the data was generated by the given model. Its ratio is most probably not so big as in the disrete model. It is because the intervals between the observations are the source of many unknown, so it is pushing the probability of real model down.
```
print( "Generator model:" , np.exp( chmm.data_estimate(t,e) ) )
print( "Random model: " ,np.exp( chmm_r.data_estimate(t,e) ) )
```
Let's run the EM algorithm for couple of iterations.
```
out = chmm_r.baum_welch( t,e, 100, est=True )
np.exp(out)
```
We will plot its probabilities estimations in ratio with generator model. (Notice, it is the ratio of logarithms of probabilities)
```
real = chmm.data_estimate( t, e )
#For better visibility of the graph, we cut first two values.
plt.plot( out[2:] / real )
plt.show()
```
### Maximum Likelihood Estimation
Sometimes, we can have a dataset of full observations (i.e. both emission and hidden states sequences). We can use method *maximum_likelihood_estimation* to estimate most likely parameters. The usage and parameters of the methods are similiar to the *baum_welch method*.
```
seq_num = 5
seq_len = 50
#generate artificial dataset of times, hidden states and emissions sequences
t_seqs, s_seqs, e_seqs = chmm.generate_data( (seq_num,seq_len), states=True )
chmm_r = hmms.CtHMM.random(3,3)
graph = chmm_r.maximum_likelihood_estimation(s_seqs,t_seqs,e_seqs,100,est=True )
#print the convergence graph
log_est = chmm.full_data_estimate ( s_seqs,t_seqs,e_seqs )
plt.plot( graph / log_est )
plt.show()
```
<a id='conv'></a>
Part 3: Comparison of Models Convergences
-----------------------------------------------------
In this chapter we will compare the convergence rate of discrete and continuous models. It will show some functions usefull for convergence among model parameters.
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
```
We will start by defining the continuous time model. For that, who have read the previous section, it will be the familiar.
```
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
Pi = np.array( [0.6,0,0.4] )
chmm = hmms.CtHMM( Q,B,Pi )
hmms.print_parameters( chmm )
```
We can simply create discrete model with equivalent parameters, using function *get_dthmm_params*.
By default, it will create the model with transition probabilities equal to one time unit probability transition in continuous model. You can pass the optional parameter for different time steps.
```
dhmm = hmms.DtHMM( *chmm.get_dthmm_params() )
hmms.print_parameters( dhmm )
```
We can let the disrete model to generate the data sufficient for both models by passing the *times* parameter as *True*.
```
t,_,e = dhmm.generate_data( (50,50), times=True )
# The free space in the return triple is for the state sequences,
# we do not need them for the training
```
We can compare the estimation of the data, using both of the model. (They should be the same.)
```
creal = chmm.data_estimate(t,e)
dreal = dhmm.data_estimate(e)
print("Data estimation by continuous model:", creal)
print("Data estimation by discrete model: ", dreal)
```
Now we will create two equivalent random models.
```
ct = hmms.CtHMM.random(3,3)
dt = hmms.DtHMM( *ct.get_dthmm_params() )
hmms.print_parameters( ct )
hmms.print_parameters( dt )
```
We will train them at our dataset. (It can take a while.)
```
iter_num = 50
outd = dt.baum_welch( e, iter_num, est=True )
outc = ct.baum_welch( t,e, iter_num, est=True )
outd,outc
```
We can plot and compare both convergence rates. From the essence of models, the continuous model will probably converge a bit slower, but finally will reach the similar value.
```
plt.plot( outd[1:] / dreal )
plt.plot( outc[1:] / dreal, color="red" )
#plt.savefig('my_plot.svg') #Optional save the figure
plt.show()
```
<a id='dataset'></a>
## Part 4: Advance Work with Datasets
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
```
### Various Length of Training Vectors
There are two supported data-structures, that you can pass toward training:
#### 1. The Numpy Matrix
The two dimensional array, where the rows consist of training sequences.
Though, this way is restricted in the way that all the vectors need to have the same size.
```
data_n = np.array( [[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 1, 0],
[2, 0, 1, 0, 0, 0, 0, 0, 0, 0]] )
dhmm_r = hmms.DtHMM.random( 2,3 )
graph_n = dhmm_r.baum_welch( data_n, 10, est=True )
np.exp( dhmm_r.data_estimate(data_n) )
```
#### 2. The List of Numpy Vectors
The standard Python list, consisting of Numpy vectors.
Every vector can have different length.
```
data_l = [ np.array( [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] ) ,
np.array( [0, 1, 0, 0, 1, 0, 1 ] ),
np.array( [2, 0, 1, 0, 0, 0, 0, 0, 0, 0] ) ]
dhmm_r = hmms.DtHMM.random( 2,3 )
graph_l = dhmm_r.baum_welch( data_l, 10, est=True )
np.exp( dhmm_r.data_estimate(data_l) )
# you can plot the graphs, just for fun.
plt.plot( graph_n, color='red' )
plt.plot( graph_l )
```
#### Continuous-Time HMM
The work with datasets in CtHMM is analogous.
```
data_n = np.array( [[0, 0, 0, 1],
[0, 2, 0, 0],
[2, 0, 1, 0] ] )
time_n = np.array( [[0, 1, 3, 4],
[0, 2, 3, 5],
[0, 2, 4, 6] ] )
chmm_r = hmms.CtHMM.random( 2,3 )
graph_n = chmm_r.baum_welch( time_n, data_n, 10, est=True )
np.exp( chmm_r.data_estimate(time_n, data_n) )
data_l = [ np.array( [0, 0, 2, 0 ] ) ,
np.array( [0, 1, 0, 0, 1 ] ),
np.array( [2, 0, 1 ] ) ]
time_l = [ np.array( [0, 1, 2, 4 ] ) ,
np.array( [0, 1, 3, 5, 6 ] ),
np.array( [0, 2, 3 ] ) ]
chmm_r = hmms.CtHMM.random( 2,3 )
graph_n = chmm_r.baum_welch( time_l, data_l, 10, est=True )
np.exp( chmm_r.data_estimate(time_l, data_l) )
```
### Time Sequences in Floats
The time sequences datatypes can be **integers** or **floats**.
However both datatypes are allowed, it is strongly *adviced* to *use* integers or floats with *integral distance*
(be carefull about float operation unprecision here.)
The *non-integral* time intervals among two neigbouring observation are *computationaly costly*, as it doesn't allow to compute matrix power and more complex operations are needed.
Later are showed two examples with the float data and possible *tricks* how to make computation *faster*.
#### Example one: Change intervals length to integer
```
data = np.array( [[0, 0, 0, 1],
[0, 2, 0, 0],
[2, 0, 1, 0] ] )
time = np.array( [[0, 1.5, 3.4, 4.7],
[0, 2.6, 5.7, 8.9],
[0, 2.2, 4.1, 9.8] ] )
```
Use data as float
```
chmm_r = hmms.CtHMM.random( 2,3 )
graph_f = chmm_r.baum_welch( time, data, 10, est=True )
np.exp( chmm_r.data_estimate(time, data) )
```
or use the **trick** so the intervals were integral.
Here it is enough to **multiply** it times 10.
**Notice**: Here we are working with randomly generated jump rates matrix, otherwise you would need to reevaluate its values, when multiplying times.
```
chmm_r = hmms.CtHMM.random( 2,3 )
graph = chmm_r.baum_welch( time*10, data, 10, est=True )
np.exp( chmm_r.data_estimate( time*10, data ) )
```
#### Example 2: Approximate the time values
Sometimes, depending upon the data, the exact observation time may not be important, so the small approximation can be helpful to get better computational time.
```
data = np.array( [[0, 0, 0, 1],
[0, 2, 0, 0],
[2, 0, 1, 0] ] )
time = np.array( [[0, 1.54587435, 3.4435434, 4.74535345],
[0, 2.64353245, 5.7435435, 8.94353454],
[0, 2.24353455, 4.1345435, 9.83454354] ] )
```
Use data as float
```
chmm_r = hmms.CtHMM.random( 2,3 )
graph_f = chmm_r.baum_welch( time, data, 10, est=True )
np.exp( chmm_r.data_estimate(time, data) )
```
or perform the **trick**. Here multiply by 100 and round to the integers.
```
time = np.round( time * 100 )
chmm_r = hmms.CtHMM.random( 2,3 )
graph = chmm_r.baum_welch( time, data, 10, est=True )
np.exp( chmm_r.data_estimate(time, data) )
```
<a id='dsest'></a>
### Datasets Probability Estimations
We have showed previously how to compute sequence probability estimations in [The discrete](#dtest) and [continuous](#ctest) model.
Here it is showed, how to make it for whole dataset by using just one command.
(We will show it at continuous time model, the discrete one is analogous, just omit the time sequences.)
```
seq_num= 10 #number of data sequences
seq_len= 10 #length of each sequence
# Create data and generate model
chmm = hmms.CtHMM.random(3,3)
t,s,e = chmm.generate_data( (seq_num,seq_len), states=True )
```
#### The Probability of the Time and Emission Sequences
We can compute the probability, of the emissions sequence given model and its time sequences.
```
np.exp( chmm.data_estimate( t, e ) )
```
#### The Probability of the State, Time and Emission Sequences
Similary we can compute the probabilty of the state, time and emission sequences given the model parameters.
```
np.exp( chmm.full_data_estimate( s, t, e ) )
```
### Multi Training
For more convenient train from various random begnings, you can use *multi_train* function.
It has parameters
method:
- 'exp' - [default] Use exponential distribution for random initialization
- 'unif' - Use uniform distribution for random initialization
and ret:
- 'all' - Return all trained models, sorted by their probability estimation
- 'best' - [default] Return only the model with the best probability estimation
```
t,e = chmm.generate_data( (5,10) )
hidden_states = 3
runs = 10
iterations = 50
out = hmms.multi_train_ct( hidden_states , t, e, runs, iterations, ret='all', method='exp')
out
```
<hr/>
You can play with the models as you like and feel free to share your result with me, if you have made some interesting experiment!
Contact: ([email protected])
### Experimental features
#### Fast Convergence
```
#The experiment is frozen
seq_num= 1 #number of data sequences
seq_len= 4 #length of each sequence
t,e = chmm.generate_data( (seq_num,seq_len) )
t,e
t = np.array([[ 0,1,3,5,6,7,9,11,12]])
e = np.array([[ 0,0,0,1,2,1,0,0,1]])
ct1 = hmms.CtHMM.random(3,3)
ct2 = hmms.CtHMM( *ct1.params )
iter_num = 50
out1 = ct1.baum_welch( t,e, iter_num, est=True )
#out2 = ct2.baum_welch( t,e, iter_num )
out1,out2
plt.plot( out1[1:] / dreal , color = "red" )
plt.plot( out2[1:] / dreal )
#plt.savefig('graph.svg') #Optional save the figure
plt.show()
hmms.print_parameters(ct1)
hmms.print_parameters(ct2)
```
#### Exponential random generation
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
# Q is the matrix of transition rates from state [row] to state [column].
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
# B is the matrix of probabilities that the state [row] will emmit output variable [column].
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
# Pi is the vector of initial state probabilities.
Pi = np.array( [0.6,0,0.4] )
# Create CtHMM by given parameters.
chmm = hmms.CtHMM(Q,B,Pi)
seq_num= 5 #number of data sequences
seq_len= 30 #length of each sequence
t,e = chmm.generate_data( (seq_num,seq_len) )
chmm_r = hmms.CtHMM.random( 3,3, method='unif' )
chmm_re = hmms.CtHMM.random( 3,3, method='exp' )
out = chmm_r.baum_welch( t,e, 10 )
oute = chmm_re.baum_welch( t,e, 10 )
#aout = np.average(out, axis=0)
#aoute = np.average(oute, axis=0)
out = hmms.multi_train(3, t, e, 10, 200, ret='all', method='exp')
aout = np.average(out, axis=0)
aoute = np.average(oute, axis=0)
mout = np.min(out, axis=0)
moute = np.min(oute, axis=0)
real = chmm.data_estimate( t, e )
#For better visibility of the graph, we cut first two values.
offset = 3
#plt.plot( aout[offset:] / real , color = "red" )
#plt.plot( aoute[offset:] / real , color = "blue" )
#plt.plot( mout[offset:] / real , color = "orange" )
#plt.plot( moute[offset:] / real , color = "green")
for line in out:
print( line/real )
plt.plot( line[offset:] / real )
plt.show()
real = chmm.data_estimate( t, e )
offset = 3
print(out)
for line in out:
#graph= line[1]
#print( type(line) )
#print( line[1]/real )
plt.plot( line[1][offset:] / real )
oute
```
### Test different length of vectors
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
data_l = [ np.array( [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] ) ,
np.array( [0, 1, 0, 0, 1, 0, 1 ] ),
np.array( [2, 0, 1, 0, 0, 0, 0, 0, 0, 0] ) ]
data_n = np.array( [[0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 2, 2, 2, 1, 0, 1, 0],
[2, 0, 1, 0, 0, 0, 0, 0, 0, 0]] )
```
#### Test Numpy matrix
```
dhmm_r = hmms.DtHMM.random( 2,3 )
graph_n = dhmm_r.baum_welch( data_n, 10, est=True )
dhmm_r.maximum_likelihood_estimation(data_n, data_n)
np.exp( dhmm_r.data_estimate(data_n) )
```
#### Test List of numpy arrays
```
dhmm_r = hmms.DtHMM.random( 2,3 )
graph_l = dhmm_r.baum_welch( data_l, 10, est=True )
np.exp( dhmm_r.data_estimate(data_l) )
plt.plot( graph_n, color='red' )
plt.plot( graph_l )
```
Make the similar for the continuous model
```
data_l = [ np.array( [0, 0, 2, 0 ] ) ,
np.array( [0, 1, 0, 0, 1 ] ),
np.array( [2, 0, 1 ] ) ]
data_n = np.array( [[0, 0, 0, 1],
[0, 2, 0, 0],
[2, 0, 1, 0] ] )
time_l = [ np.array( [0, 1, 2, 4 ] ) ,
np.array( [0, 1, 3, 5, 6 ] ),
np.array( [0, 2, 3 ] ) ]
time_n = np.array( [[0, 1, 3, 4],
[0, 2, 3, 5],
[0, 2, 4, 6] ] )
chmm_r = hmms.CtHMM.random( 2,3 )
graph_n = chmm_r.baum_welch( time_n, data_n, 10, est=True )
np.exp( chmm_r.data_estimate(time_n, data_n) )
chmm_r = hmms.CtHMM.random( 2,3 )
graph_n = chmm_r.baum_welch( time_l, data_l, 10, est=True )
np.exp( chmm_r.data_estimate(time_l, data_l) )
```
### Test double times
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
data = [ np.array( [0, 0, 2, 0 ] ) ,
np.array( [0, 1, 0, 0, 1 ] ),
np.array( [2, 0, 1 ] ) ]
time_i = [ np.array( [0, 1, 2, 4 ] ) ,
np.array( [0, 1, 3, 5, 6 ] ),
np.array( [0, 2, 3 ] ) ]
time_f = [ np.array( [0, 1.1, 2.1, 4.1 ] ) ,
np.array( [0, 1.1, 3.1, 5.1, 6.1 ] ),
np.array( [0, 2.1, 3.1 ] ) ]
chmm_r = hmms.CtHMM.random( 2,3 )
graph_i = chmm_r.baum_welch( time_i, data, 10, est=True )
np.exp( chmm_r.data_estimate(time_i, data) )
```
double
```
chmm_r = hmms.CtHMM.random( 2,3 )
graph_f = chmm_r.baum_welch( time_f, data, 10, est=True )
np.exp( chmm_r.data_estimate(time_f, data) )
plt.plot( graph_i, color='red' )
plt.plot( graph_f )
```
### Soft & Hard
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
Pi = np.array( [0.6,0,0.4] )
chmm = hmms.CtHMM( Q,B,Pi )
#chmm = hmms.CtHMM.random(15,15)
t,e = chmm.generate_data( (50,10) )
chmm_s = hmms.CtHMM.random( 3,3 )
chmm_h = hmms.CtHMM( * chmm_s.params )
chmm_c = hmms.CtHMM( * chmm_s.params )
print("comb")
#graph_comb = chmm_c.baum_welch( t, e, 5, est=True, method="hard" )
#graph_comb = np.append( graph_comb, chmm_c.baum_welch( t, e, 95, est=True, method="soft" ) )
print("hard")
graph_hard = chmm_h.baum_welch( t, e, 100, est=True, method="hard" )
print("soft")
graph_soft = chmm_s.baum_welch( t, e, 100, est=True, method="soft" )
real = chmm.data_estimate( t,e )
#real = 0
#for tt,ee in zip(t,e):
# x,_ = chmm.viterbi( tt, ee )
# real += x
#For better visibility of the graph, we cut first two values.
plt.plot( graph_soft[1:] / real, color="red" )
plt.plot( graph_hard[1:] / real, color="blue" )
##plt.plot( graph_comb[1:-1] / real, color="purple")
plt.rcParams['figure.figsize'] = [20,20]
plt.savefig('graph.png')
plt.show()
print( chmm_h.data_estimate( t,e ) )
hmms.print_parameters( chmm )
hmms.print_parameters( chmm_s )
hmms.print_parameters( chmm_h )
chmm_h.check_params()
```
### Int-intervals vs Double-intervals
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
Pi = np.array( [0.6,0,0.4] )
chmm = hmms.CtHMM( Q,B,Pi )
t,e = chmm.generate_data( (50,50) )
chmm_i = hmms.CtHMM.random( 3,3 )
chmm_d = hmms.CtHMM( * chmm_i.params )
import time
time0 = time.time()
graph_i = chmm_i.baum_welch( t, e, 100, est=True, method="soft", fast=True )
time1 = time.time()
graph_d = chmm_d.baum_welch( t, e, 100, est=True, method="soft", fast=False )
time2 = time.time()
print(time2-time1)
print(time1-time0)
chmm_i.print_ts()
chmm_d.print_ts()
real = chmm.data_estimate( t,e )
plt.plot( graph_i[1:] / real, color="red" )
plt.plot( graph_d[1:] / real, color="blue" )
hmms.print_parameters( chmm_i )
hmms.print_parameters( chmm_d )
plt.rcParams['figure.figsize'] = [25,25]
plt.show()
chmm_i.q[0,0]
chmm_d.q[0,0]
```
#### zeros
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
# Q is the matrix of transition rates from state [row] to state [column].
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
# B is the matrix of probabilities that the state [row] will emmit output variable [column].
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
# Pi is the vector of initial state probabilities.
Pi = np.array( [0.6,0,0.4] )
# Create CtHMM by given parameters.
chmm = hmms.CtHMM(Q,B,Pi)
t,e = chmm.generate_data( (10,50) )
# Q is the matrix of transition rates from state [row] to state [column].
Q = np.array( [[-0.125,0.125,0.0],[0.45,-0.45,0.0],[0.25,0.125,-0.375]] )
# B is the matrix of probabilities that the state [row] will emmit output variable [column].
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
# Pi is the vector of initial state probabilities.
Pi = np.array( [0.6,0.4,0.0] )
# Create CtHMM by given parameters.
chmm_i = hmms.CtHMM(Q,B,Pi)
graph_i = chmm_i.baum_welch( t, e, 100, est=True, method="soft", fast=True )
hmms.print_parameters( chmm_i )
```
#### random tests
```
import numpy as np
import matplotlib.pyplot as plt
import hmms
%matplotlib inline
Q = np.array( [[-0.375,0.125,0.25],[0.25,-0.5,0.25],[0.25,0.125,-0.375]] )
B = np.array( [[0.8,0.05,0.15],[0.05,0.9,0.05],[0.2,0.05,0.75]] )
Pi = np.array( [0.6,0,0.4] )
chmm = hmms.CtHMM( Q,B,Pi )
t,e = chmm.generate_data( (50,50) )
chmm_i = hmms.CtHMM.random( 3,3 )
chmm_d = hmms.CtHMM( * chmm_i.params )
graph_i = chmm_i.baum_welch( t, e, 100, est=True, method="soft", fast=True )
real = chmm.data_estimate( t,e )
plt.plot( graph_i[1:]/real , color="red" )
#hmms.print_parameters( chmm_i )
#plt.rcParams['figure.figsize'] = [25,25]
plt.show()
real = chmm.data_estimate( t,e )
plt.plot( real-graph_i[1:] , color="red" )
#hmms.print_parameters( chmm_i )
#plt.rcParams['figure.figsize'] = [25,25]
plt.show()
real = chmm.data_estimate( t,e )
plt.plot( np.exp(graph_i[1:] - real) , color="red" )
print(np.exp(graph_i[1:] - real))
#hmms.print_parameters( chmm_i )
#plt.rcParams['figure.figsize'] = [25,25]
plt.show()
real = chmm.data_estimate( t,e )
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_yscale("log", nonposx='clip')
plt.plot( np.exp(graph_i[1:] - real) , color="red" )
#plt.rcParams['figure.figsize'] = [25,25]
plt.show()
```
|
github_jupyter
|
# Notebook 13: Using Deep Learning to Study SUSY with Pytorch
## Learning Goals
The goal of this notebook is to introduce the powerful PyTorch framework for building neural networks and use it to analyze the SUSY dataset. After this notebook, the reader should understand the mechanics of PyTorch and how to construct DNNs using this package. In addition, the reader is encouraged to explore the GPU backend available in Pytorch on this dataset.
## Overview
In this notebook, we use Deep Neural Networks to classify the supersymmetry dataset, first introduced by Baldi et al. in [Nature Communication (2015)](https://www.nature.com/articles/ncomms5308). The SUSY data set consists of 5,000,000 Monte-Carlo samples of supersymmetric and non-supersymmetric collisions with $18$ features. The signal process is the production of electrically-charged supersymmetric particles which decay to $W$ bosons and an electrically-neutral supersymmetric particle that is invisible to the detector.
The first $8$ features are "raw" kinematic features that can be directly measured from collisions. The final $10$ features are "hand constructed" features that have been chosen using physical knowledge and are known to be important in distinguishing supersymmetric and non-supersymmetric collision events. More specifically, they are given by the column names below.
In this notebook, we study this dataset using Pytorch.
```
from __future__ import print_function, division
import os,sys
import numpy as np
import torch # pytorch package, allows using GPUs
# fix seed
seed=17
np.random.seed(seed)
torch.manual_seed(seed)
```
## Structure of the Procedure
Constructing a Deep Neural Network to solve ML problems is a multiple-stage process. Quite generally, one can identify the key steps as follows:
* ***step 1:*** Load and process the data
* ***step 2:*** Define the model and its architecture
* ***step 3:*** Choose the optimizer and the cost function
* ***step 4:*** Train the model
* ***step 5:*** Evaluate the model performance on the *unseen* test data
* ***step 6:*** Modify the hyperparameters to optimize performance for the specific data set
Below, we sometimes combine some of these steps together for convenience.
Notice that we take a rather different approach, compared to the simpler MNIST Keras notebook. We first define a set of classes and functions and run the actual computation only in the very end.
### Step 1: Load and Process the SUSY Dataset
The supersymmetry dataset can be downloaded from the UCI Machine Learning repository on [https://archive.ics.uci.edu/ml/machine-learning-databases/00279/SUSY.csv.gz](https://archive.ics.uci.edu/ml/machine-learning-databases/00279/SUSY.csv.gz). The dataset is quite large. Download the dataset and unzip it in a directory.
Loading data in Pytroch is done by creating a user-defined a class, which we name `SUSY_Dataset`, and is a child of the `torch.utils.data.Dataset` class. This ensures that all necessary attributes required for the processing of the data during the training and test stages are easily inherited. The `__init__` method of our custom data class should contain the usual code for loading the data, which is problem-specific, and has been discussed for the SUSY data set in Notebook 5. More importantly, the user-defined data class must override the `__len__` and `__getitem__` methods of the parent `DataSet` class. The former returns the size of the data set, while the latter allows the user to access a particular data point from the set by specifying its index.
```
from torchvision import datasets # load data
class SUSY_Dataset(torch.utils.data.Dataset):
"""SUSY pytorch dataset."""
def __init__(self, data_file, root_dir, dataset_size, train=True, transform=None, high_level_feats=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
train (bool, optional): If set to `True` load training data.
transform (callable, optional): Optional transform to be applied on a sample.
high_level_festures (bool, optional): If set to `True`, working with high-level features only.
If set to `False`, working with low-level features only.
Default is `None`: working with all features
"""
import pandas as pd
features=['SUSY','lepton 1 pT', 'lepton 1 eta', 'lepton 1 phi', 'lepton 2 pT', 'lepton 2 eta', 'lepton 2 phi',
'missing energy magnitude', 'missing energy phi', 'MET_rel', 'axial MET', 'M_R', 'M_TR_2', 'R', 'MT2',
'S_R', 'M_Delta_R', 'dPhi_r_b', 'cos(theta_r1)']
low_features=['lepton 1 pT', 'lepton 1 eta', 'lepton 1 phi', 'lepton 2 pT', 'lepton 2 eta', 'lepton 2 phi',
'missing energy magnitude', 'missing energy phi']
high_features=['MET_rel', 'axial MET', 'M_R', 'M_TR_2', 'R', 'MT2','S_R', 'M_Delta_R', 'dPhi_r_b', 'cos(theta_r1)']
#Number of datapoints to work with
df = pd.read_csv(root_dir+data_file, header=None,nrows=dataset_size,engine='python')
df.columns=features
Y = df['SUSY']
X = df[[col for col in df.columns if col!="SUSY"]]
# set training and test data size
train_size=int(0.8*dataset_size)
self.train=train
if self.train:
X=X[:train_size]
Y=Y[:train_size]
print("Training on {} examples".format(train_size))
else:
X=X[train_size:]
Y=Y[train_size:]
print("Testing on {} examples".format(dataset_size-train_size))
self.root_dir = root_dir
self.transform = transform
# make datasets using only the 8 low-level features and 10 high-level features
if high_level_feats is None:
self.data=(X.values.astype(np.float32),Y.values.astype(int))
print("Using both high and low level features")
elif high_level_feats is True:
self.data=(X[high_features].values.astype(np.float32),Y.values.astype(int))
print("Using both high-level features only.")
elif high_level_feats is False:
self.data=(X[low_features].values.astype(np.float32),Y.values.astype(int))
print("Using both low-level features only.")
# override __len__ and __getitem__ of the Dataset() class
def __len__(self):
return len(self.data[1])
def __getitem__(self, idx):
sample=(self.data[0][idx,...],self.data[1][idx])
if self.transform:
sample=self.transform(sample)
return sample
```
Last, we define a helper function `load_data()` that accepts as a required argument the set of parameters `args`, and returns two generators: `test_loader` and `train_loader` which readily return mini-batches.
```
def load_data(args):
data_file='SUSY.csv'
root_dir=os.path.expanduser('~')+'/ML_review/SUSY_data/'
kwargs = {} # CUDA arguments, if enabled
# load and noralise train and test data
train_loader = torch.utils.data.DataLoader(
SUSY_Dataset(data_file,root_dir,args.dataset_size,train=True,high_level_feats=args.high_level_feats),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
SUSY_Dataset(data_file,root_dir,args.dataset_size,train=False,high_level_feats=args.high_level_feats),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
return train_loader, test_loader
```
### Step 2: Define the Neural Net and its Architecture
To construct neural networks with Pytorch, we make another class called `model` as a child of Pytorch's `nn.Module` class. The `model` class initializes the types of layers needed for the deep neural net in its `__init__` method, while the DNN is assembled in a function method called `forward`, which accepts an `autograd.Variable` object and returns the output layer. Using this convention Pytorch will automatically recognize the structure of the DNN, and the `autograd` module will pull the gradients forward and backward using backprop.
Our code below is constructed in such a way that one can choose whether to use the high-level and low-level features separately and altogether. This choice determines the size of the fully-connected input layer `fc1`. Therefore the `__init__` method accepts the optional argument `high_level_feats`.
```
import torch.nn as nn # construct NN
class model(nn.Module):
def __init__(self,high_level_feats=None):
# inherit attributes and methods of nn.Module
super(model, self).__init__()
# an affine operation: y = Wx + b
if high_level_feats is None:
self.fc1 = nn.Linear(18, 200) # all features
elif high_level_feats:
self.fc1 = nn.Linear(10, 200) # low-level only
else:
self.fc1 = nn.Linear(8, 200) # high-level only
self.batchnorm1=nn.BatchNorm1d(200, eps=1e-05, momentum=0.1)
self.batchnorm2=nn.BatchNorm1d(100, eps=1e-05, momentum=0.1)
self.fc2 = nn.Linear(200, 100) # see forward function for dimensions
self.fc3 = nn.Linear(100, 2)
def forward(self, x):
'''Defines the feed-forward function for the NN.
A backward function is automatically defined using `torch.autograd`
Parameters
----------
x : autograd.Tensor
input data
Returns
-------
autograd.Tensor
output layer of NN
'''
# apply rectified linear unit
x = F.relu(self.fc1(x))
# apply dropout
#x=self.batchnorm1(x)
x = F.dropout(x, training=self.training)
# apply rectified linear unit
x = F.relu(self.fc2(x))
# apply dropout
#x=self.batchnorm2(x)
x = F.dropout(x, training=self.training)
# apply affine operation fc2
x = self.fc3(x)
# soft-max layer
x = F.log_softmax(x,dim=1)
return x
```
### Steps 3+4+5: Choose the Optimizer and the Cost Function. Train and Evaluate the Model
Next, we define the function `evaluate_model`. The first argument, `args`, contains all hyperparameters needed for the DNN (see below). The second and third arguments are the `train_loader` and the `test_loader` objects, returned by the function `load_data()` we defined in Step 1 above. The `evaluate_model` function returns the final `test_loss` and `test_accuracy` of the model.
First, we initialize a `model` and call the object `DNN`. In order to define the loss function and the optimizer, we use modules `torch.nn.functional` (imported here as `F`) and `torch.optim`. As a loss function we choose the negative log-likelihood, and stored is under the variable `criterion`. As usual, we can choose any from a variety of different SGD-based optimizers, but we focus on the traditional SGD.
Next, we define two functions: `train()` and `test()`. They are called at the end of `evaluate_model` where we loop over the training epochs to train and test our model.
The `train` function accepts an integer called `epoch`, which is only used to print the training data. We first set the `DNN` in a train mode using the `train()` method inherited from `nn.Module`. Then we loop over the mini-batches in `train_loader`. We cast the data as pytorch `Variable`, re-set the `optimizer`, perform the forward step by calling the `DNN` model on the `data` and computing the `loss`. The backprop algorithm is then easily done using the `backward()` method of the loss function `criterion`. We use `optimizer.step` to update the weights of the `DNN`. Last print the performance for every minibatch. `train` returns the loss on the data.
The `test` function is similar to `train` but its purpose is to test the performance of a trained model. Once we set the `DNN` model in `eval()` mode, the following steps are similar to those in `train`. We then compute the `test_loss` and the number of `correct` predictions, print the results and return them.
```
import torch.nn.functional as F # implements forward and backward definitions of an autograd operation
import torch.optim as optim # different update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc
def evaluate_model(args,train_loader,test_loader):
# create model
DNN = model(high_level_feats=args.high_level_feats)
# negative log-likelihood (nll) loss for training: takes class labels NOT one-hot vectors!
criterion = F.nll_loss
# define SGD optimizer
optimizer = optim.SGD(DNN.parameters(), lr=args.lr, momentum=args.momentum)
#optimizer = optim.Adam(DNN.parameters(), lr=0.001, betas=(0.9, 0.999))
################################################
def train(epoch):
'''Trains a NN using minibatches.
Parameters
----------
epoch : int
Training epoch number.
'''
# set model to training mode (affects Dropout and BatchNorm)
DNN.train()
# loop over training data
for batch_idx, (data, label) in enumerate(train_loader):
# zero gradient buffers
optimizer.zero_grad()
# compute output of final layer: forward step
output = DNN(data)
# compute loss
loss = criterion(output, label)
# run backprop: backward step
loss.backward()
# update weigths of NN
optimizer.step()
# print loss at current epoch
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item() ))
return loss.item()
################################################
def test():
'''Tests NN performance.
'''
# evaluate model
DNN.eval()
test_loss = 0 # loss function on test data
correct = 0 # number of correct predictions
# loop over test data
for data, label in test_loader:
# compute model prediction softmax probability
output = DNN(data)
# compute test loss
test_loss += criterion(output, label, size_average=False).item() # sum up batch loss
# find most likely prediction
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
# update number of correct predictions
correct += pred.eq(label.data.view_as(pred)).cpu().sum().item()
# print test loss
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return test_loss, correct / len(test_loader.dataset)
################################################
train_loss=np.zeros((args.epochs,))
test_loss=np.zeros_like(train_loss)
test_accuracy=np.zeros_like(train_loss)
epochs=range(1, args.epochs + 1)
for epoch in epochs:
train_loss[epoch-1] = train(epoch)
test_loss[epoch-1], test_accuracy[epoch-1] = test()
return test_loss[-1], test_accuracy[-1]
```
### Step 6: Modify the Hyperparameters to Optimize Performance of the Model
To study the performance of the model for a variety of different `data_set_sizes` and `learning_rates`, we do a grid search.
Let us define a function `grid_search`, which accepts the `args` variable containing all hyper-parameters needed for the problem. After choosing logarithmically-spaced `data_set_sizes` and `learning_rates`, we first loop over all `data_set_sizes`, update the `args` variable, and call the `load_data` function. We then loop once again over all `learning_rates`, update `args` and call `evaluate_model`.
```
def grid_search(args):
# perform grid search over learnign rate and number of hidden neurons
dataset_sizes=[1000, 10000, 100000, 200000] #np.logspace(2,5,4).astype('int')
learning_rates=np.logspace(-5,-1,5)
# pre-alocate data
test_loss=np.zeros((len(dataset_sizes),len(learning_rates)),dtype=np.float64)
test_accuracy=np.zeros_like(test_loss)
# do grid search
for i, dataset_size in enumerate(dataset_sizes):
# upate data set size parameters
args.dataset_size=dataset_size
args.batch_size=int(0.01*dataset_size)
# load data
train_loader, test_loader = load_data(args)
for j, lr in enumerate(learning_rates):
# update learning rate
args.lr=lr
print("\n training DNN with %5d data points and SGD lr=%0.6f. \n" %(dataset_size,lr) )
test_loss[i,j],test_accuracy[i,j] = evaluate_model(args,train_loader,test_loader)
plot_data(learning_rates,dataset_sizes,test_accuracy)
```
Last, we use the function `plot_data`, defined below, to plot the results.
```
import matplotlib.pyplot as plt
def plot_data(x,y,data):
# plot results
fontsize=16
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(data, interpolation='nearest', vmin=0, vmax=1)
cbar=fig.colorbar(cax)
cbar.ax.set_ylabel('accuracy (%)',rotation=90,fontsize=fontsize)
cbar.set_ticks([0,.2,.4,0.6,0.8,1.0])
cbar.set_ticklabels(['0%','20%','40%','60%','80%','100%'])
# put text on matrix elements
for i, x_val in enumerate(np.arange(len(x))):
for j, y_val in enumerate(np.arange(len(y))):
c = "${0:.1f}\\%$".format( 100*data[j,i])
ax.text(x_val, y_val, c, va='center', ha='center')
# convert axis vaues to to string labels
x=[str(i) for i in x]
y=[str(i) for i in y]
ax.set_xticklabels(['']+x)
ax.set_yticklabels(['']+y)
ax.set_xlabel('$\\mathrm{learning\\ rate}$',fontsize=fontsize)
ax.set_ylabel('$\\mathrm{hidden\\ neurons}$',fontsize=fontsize)
plt.tight_layout()
plt.show()
```
## Run Code
As we mentioned in the beginning of the notebook, all functions and classes discussed above only specify the procedure but do not actually perform any computations. This allows us to re-use them for different problems.
Actually running the training and testing for every point in the grid search is done below. The `argparse` class allows us to conveniently keep track of all hyperparameters, stored in the variable `args` which enters most of the functions we defined above.
To run the simulation, we call the function `grid_search`.
## Exercises
* One of the advantages of Pytorch is that it allows to automatically use the CUDA library for fast performance on GPU's. For the sake of clarity, we have omitted this in the above notebook. Go online to check how to put the CUDA commands back into the code above. _Hint:_ study the [Pytorch MNIST tutorial](https://github.com/pytorch/examples/blob/master/mnist/main.py) to see how this works in practice.
```
import argparse # handles arguments
import sys; sys.argv=['']; del sys # required to use parser in jupyter notebooks
# Training settings
parser = argparse.ArgumentParser(description='PyTorch SUSY Example')
parser.add_argument('--dataset_size', type=int, default=100000, metavar='DS',
help='size of data set (default: 100000)')
parser.add_argument('--high_level_feats', type=bool, default=None, metavar='HLF',
help='toggles high level features (default: None)')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.02)')
parser.add_argument('--momentum', type=float, default=0.8, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=2, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
# set seed of random number generator
torch.manual_seed(args.seed)
grid_search(args)
```
|
github_jupyter
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Start-to-Finish Example: Setting up Exact Initial Data for Einstein's Equations, in Curvilinear Coordinates
## Authors: Brandon Clark, George Vopal, and Zach Etienne
## This module sets up initial data for a specified exact solution written in terms of ADM variables, using the [*Exact* ADM Spherical to BSSN Curvilinear initial data module](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py).
**Notebook Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** This module has been validated, confirming that all initial data sets exhibit convergence to zero of the Hamiltonian and momentum constraints at the expected rate or better.
### NRPy+ Source Code for this module:
* [BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb): *Exact* Spherical ADM$\to$Curvilinear BSSN converter function
* [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\[**tutorial**\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian & momentum constraints in BSSN curvilinear basis/coordinates
## Introduction:
Here we use NRPy+ to generate a C code confirming that specified *exact* initial data satisfy Einstein's equations of general relativity. The following exact initial data types are supported:
* Shifted Kerr-Schild spinning black hole initial data
* "Static" Trumpet black hole initial data
* Brill-Lindquist two black hole initial data
* UIUC black hole initial data
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
0. [Preliminaries](#prelim): The Choices for Initial Data
1. [Choice 1](#sks): Shifted Kerr-Schild spinning black hole initial data
1. [Choice 2](#st): "Static" Trumpet black hole initial data
1. [Choice 3](#bl): Brill-Lindquist two black hole initial data
1. [Choice 4](#uiuc): UIUC black hole initial data
1. [Step 2](#initializenrpy): Set core NRPy+ parameters for numerical grids and reference metric
1. [Step 3](#adm_id): Import Black Hole ADM initial data C function from NRPy+ module
1. [Step 4](#validate): Validating that the black hole initial data satisfy the Hamiltonian constraint
1. [Step 4.a](#ham_const_output): Output C code for evaluating the Hamiltonian and Momentum constraint violation
1. [Step 4.b](#apply_bcs): Apply singular, curvilinear coordinate boundary conditions
1. [Step 4.c](#enforce3metric): Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint
1. [Step 5](#mainc): `Initial_Data.c`: The Main C Code
1. [Step 6](#plot): Plotting the initial data
1. [Step 7](#convergence): Validation: Convergence of numerical errors (Hamiltonian constraint violation) to zero
1. [Step 8](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='prelim'></a>
# Preliminaries: The Choices for Initial Data
$$\label{prelim}$$
<a id='sks'></a>
## Shifted Kerr-Schild spinning black hole initial data \[Back to [top](#toc)\]
$$\label{sks}$$
Here we use NRPy+ to generate initial data for a spinning black hole.
Shifted Kerr-Schild spinning black hole initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of both the Hamiltonian and momentum constraint violations at the expected order to the exact solution.
**NRPy+ Source Code:**
* [BSSN/ShiftedKerrSchild.py](../edit/BSSN/ShiftedKerrSchild.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb)
The [BSSN.ShiftedKerrSchild](../edit/BSSN/ShiftedKerrSchild.py) NRPy+ module does the following:
1. Set up shifted Kerr-Schild initial data, represented by [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Spherical basis**, as [documented here](Tutorial-ADM_Initial_Data-ShiftedKerrSchild.ipynb).
1. Convert the exact ADM **Spherical quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb).
1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string.
<a id='st'></a>
## "Static" Trumpet black hole initial data \[Back to [top](#toc)\]
$$\label{st}$$
Here we use NRPy+ to generate initial data for a single trumpet black hole ([Dennison & Baumgarte, PRD ???](https://arxiv.org/abs/??)).
"Static" Trumpet black hole initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution. It was carefully ported from the [original NRPy+ code](https://bitbucket.org/zach_etienne/nrpy).
**NRPy+ Source Code:**
* [BSSN/StaticTrumpet.py](../edit/BSSN/StaticTrumpet.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-StaticTrumpet.ipynb)
The [BSSN.StaticTrumpet](../edit/BSSN/StaticTrumpet.py) NRPy+ module does the following:
1. Set up static trumpet black hole initial data, represented by [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Spherical basis**, as [documented here](Tutorial-ADM_Initial_Data-StaticTrumpetBlackHole.ipynb).
1. Convert the exact ADM **Spherical quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb).
1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string.
<a id='bl'></a>
## Brill-Lindquist initial data \[Back to [top](#toc)\]
$$\label{bl}$$
Here we use NRPy+ to generate initial data for two black holes (Brill-Lindquist, [Brill & Lindquist, Phys. Rev. 131, 471, 1963](https://journals.aps.org/pr/abstract/10.1103/PhysRev.131.471); see also Eq. 1 of [Brandt & Brügmann, arXiv:gr-qc/9711015v1](https://arxiv.org/pdf/gr-qc/9711015v1.pdf)).
[//]: # " and then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on the [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4)."
Brill-Lindquist initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution, and all quantities have been validated against the [original SENR code](https://bitbucket.org/zach_etienne/nrpy).
**NRPy+ Source Code:**
* [BSSN/BrillLindquist.py](../edit/BSSN/BrillLindquist.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb)
* [BSSN/BSSN_ID_function_string.py](../edit/BSSN/BSSN_ID_function_string.py)
The [BSSN.BrillLindquist](../edit/BSSN/BrillLindquist.py) NRPy+ module does the following:
1. Set up Brill-Lindquist initial data [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Cartesian basis**, as [documented here](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb).
1. Convert the ADM **Cartesian quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_ADMCartesian_to_BSSNCurvilinear.ipynb).
1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string.
<a id='uiuc'></a>
## UIUC black hole initial data \[Back to [top](#toc)\]
$$\label{uiuc}$$
UIUC black hole initial data has been <font color='green'><b> validated </b></font> to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution, and all quantities have been validated against the [original SENR code](https://bitbucket.org/zach_etienne/nrpy).
**NRPy+ Source Code:**
* [BSSN/UIUCBlackHole.py](../edit/BSSN/UIUCBlackHole.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-UIUCBlackHole.ipynb)
The [BSSN.UIUCBlackHole](../edit/BSSN/UIUCBlackHole.py) NRPy+ module does the following:
1. Set up UIUC black hole initial data, represented by [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Spherical basis**, as [documented here](Tutorial-ADM_Initial_Data-UIUCBlackHole.ipynb).
1. Convert the numerical ADM **Spherical quantities** to **BSSN quantities in the desired Curvilinear basis** (set by `reference_metric::CoordSystem`), as [documented here](Tutorial-ADM_Initial_Data-Converting_Numerical_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb).
1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string.
<a id='-pickid'></a>
# Step 1: Specify the Initial Data to Test \[Back to [top](#toc)\]
$$\label{pickid}$$
Here you have a choice for which initial data you would like to import and test for convergence. The following is a list of the currently compatible `initial_data_string` options for you to choose from.
* `"Shifted KerrSchild"`
* `"Static Trumpet"`
* `"Brill-Lindquist"`
* `"UIUC"`
```
import collections
#################
# For the User: Choose initial data, default is Shifted KerrSchild.
# You are also encouraged to adjust any of the
# DestGridCoordSystem, freeparams, or EnableMomentum parameters!
# NOTE: Only DestGridCoordSystem == Spherical or SinhSpherical
# currently work out of the box; additional modifications
# will likely be necessary for other CoordSystems.
#################
initial_data_string = "Shifted KerrSchild" # "UIUC"
dictID = {}
IDmod_retfunc = collections.namedtuple('IDmod_retfunc', 'modulename functionname DestGridCoordSystem freeparams EnableMomentum')
dictID['Shifted KerrSchild'] = IDmod_retfunc(
modulename = "BSSN.ShiftedKerrSchild", functionname = "ShiftedKerrSchild",
DestGridCoordSystem = "Spherical",
freeparams = ["const REAL M = 1.0;", "const REAL a = 0.9;", "const REAL r0 = 1.0;"],
EnableMomentum = True)
dictID['Static Trumpet'] = IDmod_retfunc(
modulename = "BSSN.StaticTrumpet", functionname = "StaticTrumpet",
DestGridCoordSystem = "Spherical",
freeparams = ["const REAL M = 1.0;"],
EnableMomentum = False)
dictID['Brill-Lindquist'] = IDmod_retfunc(
modulename = "BSSN.BrillLindquist", functionname = "BrillLindquist",
DestGridCoordSystem = "Spherical",
freeparams = ["const REAL BH1_posn_x =-1.0,BH1_posn_y = 0.0,BH1_posn_z = 0.0;",
"const REAL BH2_posn_x = 1.0,BH2_posn_y = 0.0,BH2_posn_z = 0.0;", "const REAL BH1_mass = 0.5,BH2_mass = 0.5;"],
EnableMomentum = False)
dictID['UIUC'] = IDmod_retfunc(modulename = "BSSN.UIUCBlackHole", functionname = "UIUCBlackHole",
DestGridCoordSystem = "SinhSpherical",
freeparams = ["const REAL M = 1.0;", "const REAL chi = 0.99;"],
EnableMomentum = True)
```
<a id='initializenrpy'></a>
# Step 2: Set up the needed NRPy+ infrastructure and declare core gridfunctions \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
We will import the core modules of NRPy that we will need and specify the main gridfunctions we will need.
```
# Step P1: Import needed NRPy+ core modules:
from outputC import lhrh,outC_function_dict,outCfunction # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import shutil, os, sys, time # Standard Python modules for multiplatform OS-level functions, benchmarking
import importlib # Standard Python module for interactive module imports
# Step P2: Create C code output directory:
Ccodesdir = os.path.join("BlackHoleID_Ccodes/")
# First remove C code output directory if it exists
# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# !rm -r ScalarWaveCurvilinear_Playground_Ccodes
shutil.rmtree(Ccodesdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(Ccodesdir)
# Step P3: Create executable output directory:
outdir = os.path.join(Ccodesdir,"output/")
cmd.mkdir(outdir)
# Step 1: Set the spatial dimension parameter
# to three this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set some core parameters, including CoordSystem MoL timestepping algorithm,
# FD order, floating point precision, and CFL factor:
# Choices are: Spherical, SinhSpherical, SinhSphericalv2, Cylindrical, SinhCylindrical,
# SymTP, SinhSymTP
CoordSystem = "Spherical"
# Step 2.a: Set defaults for Coordinate system parameters.
# These are perhaps the most commonly adjusted parameters,
# so we enable modifications at this high level.
# domain_size sets the default value for:
# * Spherical's params.RMAX
# * SinhSpherical*'s params.AMAX
# * Cartesians*'s -params.{x,y,z}min & .{x,y,z}max
# * Cylindrical's -params.ZMIN & .{Z,RHO}MAX
# * SinhCylindrical's params.AMPL{RHO,Z}
# * *SymTP's params.AMAX
domain_size = 3.0
# sinh_width sets the default value for:
# * SinhSpherical's params.SINHW
# * SinhCylindrical's params.SINHW{RHO,Z}
# * SinhSymTP's params.SINHWAA
sinh_width = 0.4 # If Sinh* coordinates chosen
# sinhv2_const_dr sets the default value for:
# * SinhSphericalv2's params.const_dr
# * SinhCylindricalv2's params.const_d{rho,z}
sinhv2_const_dr = 0.05# If Sinh*v2 coordinates chosen
# SymTP_bScale sets the default value for:
# * SinhSymTP's params.bScale
SymTP_bScale = 0.5 # If SymTP chosen
FD_order = 4 # Finite difference order: even numbers only, starting with 2. 12 is generally unstable
REAL = "double" # Best to use double here.
# Step 3: Set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Step 4: Set the finite differencing order to FD_order (set above).
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", FD_order)
# Step 5: Set the direction=2 (phi) axis to be the symmetry axis; i.e.,
# axis "2", corresponding to the i2 direction.
# This sets all spatial derivatives in the phi direction to zero.
par.set_parval_from_str("indexedexp::symmetry_axes","2")
# Step 6: The MoLtimestepping interface is only used for memory allocation/deallocation
import MoLtimestepping.C_Code_Generation as MoL
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
RK_method = "Euler" # DOES NOT MATTER; Again MoL interface is only used for memory alloc/dealloc.
RK_order = Butcher_dict[RK_method][1]
cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/"))
MoL.MoL_C_Code_Generation(RK_method, RHS_string = "", post_RHS_string = "",
outdir = os.path.join(Ccodesdir,"MoLtimestepping/"))
```
<a id='adm_id'></a>
# Step 3: Import Black Hole ADM initial data C function from NRPy+ module \[Back to [top](#toc)\]
$$\label{adm_id}$$
```
# Import Black Hole initial data
IDmodule = importlib.import_module(dictID[initial_data_string].modulename)
IDfunc = getattr(IDmodule, dictID[initial_data_string].functionname)
IDfunc() # Registers ID C function in dictionary, used below to output to file.
with open(os.path.join(Ccodesdir,"initial_data.h"),"w") as file:
file.write(outC_function_dict["initial_data"])
```
<a id='cparams_rfm_and_domainsize'></a>
## Step 3.a: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\]
$$\label{cparams_rfm_and_domainsize}$$
Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.
Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above
```
# Step 3.a.i: Set free_parameters.h
# Output to $Ccodesdir/free_parameters.h reference metric parameters based on generic
# domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale,
# parameters set above.
rfm.out_default_free_parameters_for_rfm(os.path.join(Ccodesdir,"free_parameters.h"),
domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale)
# Step 3.a.ii: Generate set_Nxx_dxx_invdx_params__and__xx.h:
rfm.set_Nxx_dxx_invdx_params__and__xx_h(Ccodesdir)
# Step 3.a.iii: Generate xx_to_Cart.h, which contains xx_to_Cart() for
# (the mapping from xx->Cartesian) for the chosen
# CoordSystem:
rfm.xx_to_Cart_h("xx_to_Cart","./set_Cparameters.h",os.path.join(Ccodesdir,"xx_to_Cart.h"))
# Step 3.a.iv: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
```
<a id='validate'></a>
# Step 4: Validating that the black hole initial data satisfy the Hamiltonian constraint \[Back to [top](#toc)\]
$$\label{validate}$$
We will validate that the black hole initial data satisfy the Hamiltonian constraint, modulo numerical finite differencing error.
<a id='ham_const_output'></a>
## Step 4.a: Output C code for evaluating the Hamiltonian and Momentum constraint violation \[Back to [top](#toc)\]
$$\label{ham_const_output}$$
First output C code for evaluating the Hamiltonian constraint violation. For the initial data where `EnableMomentum = True` we must also output C code for evaluating the Momentum constraint violation.
```
import BSSN.BSSN_constraints as bssncon
# Now register the Hamiltonian & momentum constraints as gridfunctions.
H = gri.register_gridfunctions("AUX","H")
MU = ixp.register_gridfunctions_for_single_rank1("AUX", "MU")
# Generate symbolic expressions for Hamiltonian & momentum constraints
import BSSN.BSSN_constraints as bssncon
bssncon.BSSN_constraints(add_T4UUmunu_source_terms=False)
# Generate optimized C code for Hamiltonian constraint
desc="Evaluate the Hamiltonian constraint"
name="Hamiltonian_constraint"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name,
params = """const paramstruct *restrict params, REAL *restrict xx[3],
REAL *restrict in_gfs, REAL *restrict aux_gfs""",
body = fin.FD_outputC("returnstring",lhrh(lhs=gri.gfaccess("aux_gfs", "H"), rhs=bssncon.H),
params="outCverbose=False"),
loopopts = "InteriorPoints,Read_xxs")
# Generate optimized C code for momentum constraint
desc="Evaluate the momentum constraint"
name="momentum_constraint"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), desc=desc, name=name,
params = """const paramstruct *restrict params, REAL *restrict xx[3],
REAL *restrict in_gfs, REAL *restrict aux_gfs""",
body = fin.FD_outputC("returnstring",
[lhrh(lhs=gri.gfaccess("aux_gfs", "MU0"), rhs=bssncon.MU[0]),
lhrh(lhs=gri.gfaccess("aux_gfs", "MU1"), rhs=bssncon.MU[1]),
lhrh(lhs=gri.gfaccess("aux_gfs", "MU2"), rhs=bssncon.MU[2])],
params="outCverbose=False"),
loopopts = "InteriorPoints,Read_xxs")
```
<a id='enforce3metric'></a>
## Step 4.b: Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint \[Back to [top](#toc)\]
$$\label{enforce3metric}$$
Then enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN_enforcing_determinant_gammabar_equals_gammahat_constraint.ipynb)
Applying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint:
```
# Set up the C function for the det(gammahat) = det(gammabar)
import BSSN.Enforce_Detgammahat_Constraint as EGC
enforce_detg_constraint_symb_expressions = EGC.Enforce_Detgammahat_Constraint_symb_expressions()
EGC.output_Enforce_Detgammahat_Constraint_Ccode(Ccodesdir,exprs=enforce_detg_constraint_symb_expressions,
Read_xxs=True)
```
<a id='bc_functs'></a>
## Step 4.c: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](#toc)\]
$$\label{bc_functs}$$
Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)
```
import CurviBoundaryConditions.CurviBoundaryConditions as cbcs
cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"))
```
<a id='mainc'></a>
# Step 5: `Initial_Data_Playground.c`: The Main C Code \[Back to [top](#toc)\]
$$\label{mainc}$$
```
# Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
# set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits.
with open(os.path.join(Ccodesdir,"Initial_Data_Playground_REAL__NGHOSTS.h"), "w") as file:
file.write("""
// Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
#define NGHOSTS """+str(int(par.parval_from_str("finite_difference::FD_CENTDERIVS_ORDER")/2)+1)+"""\n
// Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point
// numbers are stored to at least ~16 significant digits
#define REAL double\n""")
%%writefile $Ccodesdir/Initial_Data_Playground.c
// Step P0: Define REAL and NGHOSTS. This header is generated by NRPy+.
#include "Initial_Data_Playground_REAL__NGHOSTS.h"
#include "declare_Cparameters_struct.h"
// Step P1: Import needed header files
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#ifndef M_PI
#define M_PI 3.141592653589793238462643383279502884L
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524400844362104849039L
#endif
// Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of
// data in a 1D array. In this case, consecutive values of "i"
// (all other indices held to a fixed value) are consecutive in memory, where
// consecutive values of "j" (fixing all other indices) are separated by
// Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of
// "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc.
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) )
#define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) )
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
#define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \
for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++)
// Step P3: Set UUGF and VVGF macros, as well as xx_to_Cart()
#include "boundary_conditions/gridfunction_defines.h"
// Step P4: Set xx_to_Cart(const paramstruct *restrict params,
// REAL *restrict xx[3],
// const int i0,const int i1,const int i2,
// REAL xCart[3]),
// which maps xx->Cartesian via
// {xx[0][i0],xx[1][i1],xx[2][i2]}->{xCart[0],xCart[1],xCart[2]}
#include "xx_to_Cart.h"
// Step P5: Defines set_Nxx_dxx_invdx_params__and__xx(const int EigenCoord, const int Nxx[3],
// paramstruct *restrict params, REAL *restrict xx[3]),
// which sets params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for
// the chosen Eigen-CoordSystem if EigenCoord==1, or
// CoordSystem if EigenCoord==0.
#include "set_Nxx_dxx_invdx_params__and__xx.h"
// Step P6: Include basic functions needed to impose curvilinear
// parity and boundary conditions.
#include "boundary_conditions/CurviBC_include_Cfunctions.h"
// Step P8: Include function for enforcing detgammabar constraint.
#include "enforce_detgammahat_constraint.h"
// Step P10: Declare function necessary for setting up the initial data.
// Step P10.a: Define BSSN_ID() for BrillLindquist initial data
// Step P10.b: Set the generic driver function for setting up BSSN initial data
#include "initial_data.h"
// Step P11: Declare function for evaluating Hamiltonian constraint (diagnostic)
#include "Hamiltonian_constraint.h"
#include "momentum_constraint.h"
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up initial data to an exact solution
// Step 2: Start the timer, for keeping track of how fast the simulation is progressing.
// Step 3: Integrate the initial data forward in time using the chosen RK-like Method of
// Lines timestepping algorithm, and output periodic simulation diagnostics
// Step 3.a: Output 2D data file periodically, for visualization
// Step 3.b: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
// Step 3.c: If t=t_final, output conformal factor & Hamiltonian
// constraint violation to 2D data file
// Step 3.d: Progress indicator printing to stderr
// Step 4: Free all allocated memory
int main(int argc, const char *argv[]) {
paramstruct params;
#include "set_Cparameters_default.h"
// Step 0a: Read command-line input, error out if nonconformant
if((argc != 4) || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < 2 /* FIXME; allow for axisymmetric sims */) {
fprintf(stderr,"Error: Expected three command-line arguments: ./BrillLindquist_Playground Nx0 Nx1 Nx2,\n");
fprintf(stderr,"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n");
fprintf(stderr,"Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
// Step 0b: Set up numerical grid structure, first in space...
const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) };
if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) {
fprintf(stderr,"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n");
fprintf(stderr," For example, in case of angular directions, proper symmetry zones will not exist.\n");
exit(1);
}
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
// Step 0d: Uniform coordinate grids are stored to *xx[3]
REAL *xx[3];
// Step 0d.i: Set bcstruct
bc_struct bcstruct;
{
int EigenCoord = 1;
// Step 0d.ii: Call set_Nxx_dxx_invdx_params__and__xx(), which sets
// params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the
// chosen Eigen-CoordSystem.
set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);
// Step 0d.iii: Set Nxx_plus_2NGHOSTS_tot
#include "set_Cparameters-nopointer.h"
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0e: Find ghostzone mappings; set up bcstruct
#include "boundary_conditions/driver_bcstruct.h"
// Step 0e.i: Free allocated space for xx[][] array
for(int i=0;i<3;i++) free(xx[i]);
}
// Step 0f: Call set_Nxx_dxx_invdx_params__and__xx(), which sets
// params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the
// chosen (non-Eigen) CoordSystem.
int EigenCoord = 0;
set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);
// Step 0g: Set all C parameters "blah" for params.blah, including
// Nxx_plus_2NGHOSTS0 = params.Nxx_plus_2NGHOSTS0, etc.
#include "set_Cparameters-nopointer.h"
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0j: Error out if the number of auxiliary gridfunctions outnumber evolved gridfunctions.
// This is a limitation of the RK method. You are always welcome to declare & allocate
// additional gridfunctions by hand.
if(NUM_AUX_GFS > NUM_EVOL_GFS) {
fprintf(stderr,"Error: NUM_AUX_GFS > NUM_EVOL_GFS. Either reduce the number of auxiliary gridfunctions,\n");
fprintf(stderr," or allocate (malloc) by hand storage for *diagnostic_output_gfs. \n");
exit(1);
}
// Step 0k: Allocate memory for gridfunctions
#include "MoLtimestepping/RK_Allocate_Memory.h"
REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot);
// Step 1: Set up initial data to an exact solution
initial_data(¶ms, xx, y_n_gfs);
// Step 1b: Apply boundary conditions, as initial data
// are sometimes ill-defined in ghost zones.
// E.g., spherical initial data might not be
// properly defined at points where r=-1.
apply_bcs_curvilinear(¶ms, &bcstruct, NUM_EVOL_GFS,evol_gf_parity, y_n_gfs);
enforce_detgammahat_constraint(¶ms, xx, y_n_gfs);
// Evaluate Hamiltonian & momentum constraint violations
Hamiltonian_constraint(¶ms, xx, y_n_gfs, diagnostic_output_gfs);
momentum_constraint( ¶ms, xx, y_n_gfs, diagnostic_output_gfs);
/* Step 2: 2D output: Output conformal factor (CFGF) and constraint violations (HGF, MU0GF, MU1GF, MU2GF). */
const int i0MIN=NGHOSTS; // In spherical, r=Delta r/2.
const int i1mid=Nxx_plus_2NGHOSTS1/2;
const int i2mid=Nxx_plus_2NGHOSTS2/2;
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS0-NGHOSTS, i1mid,i1mid+1, NGHOSTS,Nxx_plus_2NGHOSTS2-NGHOSTS) {
REAL xCart[3];
xx_to_Cart(¶ms, xx, i0,i1,i2, xCart);
int idx = IDX3S(i0,i1,i2);
printf("%e %e %e %e %e %e %e\n",xCart[0],xCart[1], y_n_gfs[IDX4ptS(CFGF,idx)],
log10(fabs(diagnostic_output_gfs[IDX4ptS(HGF,idx)])),
log10(fabs(diagnostic_output_gfs[IDX4ptS(MU0GF,idx)])+1e-200),
log10(fabs(diagnostic_output_gfs[IDX4ptS(MU1GF,idx)])+1e-200),
log10(fabs(diagnostic_output_gfs[IDX4ptS(MU2GF,idx)])+1e-200));
}
// Step 4: Free all allocated memory
#include "boundary_conditions/bcstruct_freemem.h"
#include "MoLtimestepping/RK_Free_Memory.h"
free(auxevol_gfs);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
}
import cmdline_helper as cmd
cmd.C_compile(os.path.join(Ccodesdir,"Initial_Data_Playground.c"), "Initial_Data_Playground")
cmd.delete_existing_files("out*.txt")
cmd.delete_existing_files("out*.png")
args_output_list = [["96 96 96", "out96.txt"], ["48 48 48", "out48.txt"]]
for args_output in args_output_list:
cmd.Execute("Initial_Data_Playground", args_output[0], args_output[1])
```
<a id='plot'></a>
# Step 6: Plotting the initial data \[Back to [top](#toc)\]
$$\label{plot}$$
Here we plot the evolved conformal factor of these initial data on a 2D grid, such that darker colors imply stronger gravitational fields. Hence, we see the black hole(s) centered at $x/M=\pm 1$, where $M$ is an arbitrary mass scale (conventionally the [ADM mass](https://en.wikipedia.org/w/index.php?title=ADM_formalism&oldid=846335453) is chosen), and our formulation of Einstein's equations adopt $G=c=1$ [geometrized units](https://en.wikipedia.org/w/index.php?title=Geometrized_unit_system&oldid=861682626).
```
# First install scipy if it's not yet installed. This will have no effect if it's already installed.
!pip install scipy
import numpy as np
from scipy.interpolate import griddata
from pylab import savefig
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.display import Image
x96,y96,valuesCF96,valuesHam96,valuesmomr96,valuesmomtheta96,valuesmomphi96 = np.loadtxt('out96.txt').T #Transposed for easier unpacking
pl_xmin = -3.
pl_xmax = +3.
pl_ymin = -3.
pl_ymax = +3.
grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:100j, pl_ymin:pl_ymax:100j]
points96 = np.zeros((len(x96), 2))
for i in range(len(x96)):
points96[i][0] = x96[i]
points96[i][1] = y96[i]
grid96 = griddata(points96, valuesCF96, (grid_x, grid_y), method='nearest')
grid96cub = griddata(points96, valuesCF96, (grid_x, grid_y), method='cubic')
plt.clf()
plt.title("Initial Data")
plt.xlabel("x/M")
plt.ylabel("y/M")
# fig, ax = plt.subplots()
#ax.plot(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
plt.imshow(grid96.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
savefig("ID.png")
plt.close()
Image("ID.png")
# # interpolation='nearest', cmap=cm.gist_rainbow)
```
<a id='convergence'></a>
# Step 7: Validation: Convergence of numerical errors (Hamiltonian & momentum constraint violations) to zero \[Back to [top](#toc)\]
$$\label{convergence}$$
**Special thanks to George Vopal for creating the following plotting script.**
The equations behind these initial data solve Einstein's equations exactly, at a single instant in time. One reflection of this solution is that the Hamiltonian constraint violation should be exactly zero in the initial data.
However, when evaluated on numerical grids, the Hamiltonian constraint violation will *not* generally evaluate to zero due to the associated numerical derivatives not being exact. However, these numerical derivatives (finite difference derivatives in this case) should *converge* to the exact derivatives as the density of numerical sampling points approaches infinity.
In this case, all of our finite difference derivatives agree with the exact solution, with an error term that drops with the uniform gridspacing to the fourth power: $\left(\Delta x^i\right)^4$.
Here, as in the [Start-to-Finish Scalar Wave (Cartesian grids) NRPy+ tutorial](Tutorial-Start_to_Finish-ScalarWave.ipynb) and the [Start-to-Finish Scalar Wave (curvilinear grids) NRPy+ tutorial](Tutorial-Start_to_Finish-ScalarWaveCurvilinear.ipynb) we confirm this convergence.
First, let's take a look at what the numerical error looks like on the x-y plane at a given numerical resolution, plotting $\log_{10}|H|$, where $H$ is the Hamiltonian constraint violation:
```
RefData=[valuesHam96,valuesmomr96,valuesmomtheta96,valuesmomphi96]
SubTitles=["\mathcal{H}",'\mathcal{M}^r',r"\mathcal{M}^{\theta}","\mathcal{M}^{\phi}"]
axN = [] #this will let us automate the subplots in the loop that follows
grid96N = [] #we need to calculate the grid96 data for each constraint for use later
plt.clf()
# We want to create four plots. One for the Hamiltonian, and three for the momentum
# constraints (r,th,ph)
# Define the size of the overall figure
fig = plt.figure(figsize=(12,12)) # 8 in x 8 in
num_plots = 4
if dictID[initial_data_string].EnableMomentum == False:
num_plots = 1
for p in range(num_plots):
grid96 = griddata(points96, RefData[p], (grid_x, grid_y), method='nearest')
grid96N.append(grid96)
grid96cub = griddata(points96, RefData[p], (grid_x, grid_y), method='cubic')
#fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
#Generate the subplot for the each constraint
ax = fig.add_subplot(221+p)
axN.append(ax) # Grid of 2x2
axN[p].set_xlabel('x/M')
axN[p].set_ylabel('y/M')
axN[p].set_title('$96^3$ Numerical Err.: $log_{10}|'+SubTitles[p]+'|$')
fig96cub = plt.imshow(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
cb = plt.colorbar(fig96cub)
# Adjust the spacing between plots
plt.tight_layout(pad=4)
```
Next, we set up the same initial data but on a lower-resolution, $48^3$ grid. Since the constraint violation (numerical error associated with the fourth-order-accurate, finite-difference derivatives) should converge to zero with the uniform gridspacing to the fourth power: $\left(\Delta x^i\right)^4$, we expect the constraint violation will increase (relative to the $96^3$ grid) by a factor of $\left(96/48\right)^4$. Here we demonstrate that indeed this order of convergence is observed as expected. I.e., at all points *except* at the points immediately surrounding the coordinate center of the black hole (due to the spatial slice excising the physical singularity at this point through [the puncture method](http://gr.physics.ncsu.edu/UMD_June09.pdf)) exhibit numerical errors that drop as $\left(\Delta x^i\right)^4$.
```
x48,y48,valuesCF48,valuesHam48,valuesmomr48,valuesmomtheta48,valuesmomphi48 = np.loadtxt('out48.txt').T #Transposed for easier unpacking
points48 = np.zeros((len(x48), 2))
for i in range(len(x48)):
points48[i][0] = x48[i]
points48[i][1] = y48[i]
RefData=[valuesHam48,valuesmomr48,valuesmomtheta48,valuesmomphi48]
SubTitles=["\mathcal{H}",'\mathcal{M}^r',r"\mathcal{M}^{\theta}","\mathcal{M}^{\phi}"]
axN = []
plt.clf()
# We want to create four plots. One for the Hamiltonian, and three for the momentum
# constrains (r,th,ph)
# Define the size of the overall figure
fig = plt.figure(figsize=(12,12)) # 8 in x 8 in
for p in range(num_plots): #loop to cycle through our constraints and plot the data
grid48 = griddata(points48, RefData[p], (grid_x, grid_y), method='nearest')
griddiff_48_minus_96 = np.zeros((100,100))
griddiff_48_minus_96_1darray = np.zeros(100*100)
gridx_1darray_yeq0 = np.zeros(100)
grid48_1darray_yeq0 = np.zeros(100)
grid96_1darray_yeq0 = np.zeros(100)
count = 0
for i in range(100):
for j in range(100):
griddiff_48_minus_96[i][j] = grid48[i][j] - grid96N[p][i][j]
griddiff_48_minus_96_1darray[count] = griddiff_48_minus_96[i][j]
if j==49:
gridx_1darray_yeq0[i] = grid_x[i][j]
grid48_1darray_yeq0[i] = grid48[i][j] + np.log10((48./96.)**4)
grid96_1darray_yeq0[i] = grid96N[p][i][j]
count = count + 1
#Generate the subplot for the each constraint
ax = fig.add_subplot(221+p)
axN.append(ax) # Grid of 2x2
axN[p].set_title('Plot Demonstrating $4^{th}$-Order Convergence of $'+SubTitles[p]+'$')
axN[p].set_xlabel("x/M")
axN[p].set_ylabel("$log_{10}$(Relative Error)")
ax.plot(gridx_1darray_yeq0, grid96_1darray_yeq0, 'k-', label='Nr=96')
ax.plot(gridx_1darray_yeq0, grid48_1darray_yeq0, 'k--', label='Nr=48, mult by (48/96)^4')
ax.set_ylim([-14,4.])
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('C1')
# Adjust the spacing between plots
plt.tight_layout(pad=4)
```
<a id='latex_pdf_output'></a>
# Step 7: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.pdf](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data")
```
|
github_jupyter
|
# Node classification with Graph ATtention Network (GAT)
<table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/node-classification/gat-node-classification.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/node-classification/gat-node-classification.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
Import NetworkX and stellar:
```
# install StellarGraph if running on Google Colab
import sys
if 'google.colab' in sys.modules:
%pip install -q stellargraph[demos]==1.3.0b
# verify that we're using the correct version of StellarGraph for this notebook
import stellargraph as sg
try:
sg.utils.validate_notebook_version("1.3.0b")
except AttributeError:
raise ValueError(
f"This notebook requires StellarGraph version 1.3.0b, but a different version {sg.__version__} is installed. Please see <https://github.com/stellargraph/stellargraph/issues/1172>."
) from None
import networkx as nx
import pandas as pd
import os
import stellargraph as sg
from stellargraph.mapper import FullBatchNodeGenerator
from stellargraph.layer import GAT
from tensorflow.keras import layers, optimizers, losses, metrics, Model
from sklearn import preprocessing, feature_extraction, model_selection
from stellargraph import datasets
from IPython.display import display, HTML
import matplotlib.pyplot as plt
%matplotlib inline
```
## Loading the CORA network
(See [the "Loading from Pandas" demo](../basics/loading-pandas.ipynb) for details on how data can be loaded.)
```
dataset = datasets.Cora()
display(HTML(dataset.description))
G, node_subjects = dataset.load()
print(G.info())
```
We aim to train a graph-ML model that will predict the "subject" attribute on the nodes. These subjects are one of 7 categories:
```
set(node_subjects)
```
### Splitting the data
For machine learning we want to take a subset of the nodes for training, and use the rest for validation and testing. We'll use scikit-learn again to do this.
Here we're taking 140 node labels for training, 500 for validation, and the rest for testing.
```
train_subjects, test_subjects = model_selection.train_test_split(
node_subjects, train_size=140, test_size=None, stratify=node_subjects
)
val_subjects, test_subjects = model_selection.train_test_split(
test_subjects, train_size=500, test_size=None, stratify=test_subjects
)
```
Note using stratified sampling gives the following counts:
```
from collections import Counter
Counter(train_subjects)
```
The training set has class imbalance that might need to be compensated, e.g., via using a weighted cross-entropy loss in model training, with class weights inversely proportional to class support. However, we will ignore the class imbalance in this example, for simplicity.
### Converting to numeric arrays
For our categorical target, we will use one-hot vectors that will be fed into a soft-max Keras layer during training. To do this conversion ...
```
target_encoding = preprocessing.LabelBinarizer()
train_targets = target_encoding.fit_transform(train_subjects)
val_targets = target_encoding.transform(val_subjects)
test_targets = target_encoding.transform(test_subjects)
```
We now do the same for the node attributes we want to use to predict the subject. These are the feature vectors that the Keras model will use as input. The CORA dataset contains attributes 'w_x' that correspond to words found in that publication. If a word occurs more than once in a publication the relevant attribute will be set to one, otherwise it will be zero.
## Creating the GAT model in Keras
To feed data from the graph to the Keras model we need a generator. Since GAT is a full-batch model, we use the `FullBatchNodeGenerator` class to feed node features and graph adjacency matrix to the model.
```
generator = FullBatchNodeGenerator(G, method="gat")
```
For training we map only the training nodes returned from our splitter and the target values.
```
train_gen = generator.flow(train_subjects.index, train_targets)
```
Now we can specify our machine learning model, we need a few more parameters for this:
* the `layer_sizes` is a list of hidden feature sizes of each layer in the model. In this example we use two GAT layers with 8-dimensional hidden node features for the first layer and the 7 class classification output for the second layer.
* `attn_heads` is the number of attention heads in all but the last GAT layer in the model
* `activations` is a list of activations applied to each layer's output
* Arguments such as `bias`, `in_dropout`, `attn_dropout` are internal parameters of the model, execute `?GAT` for details.
To follow the GAT model architecture used for Cora dataset in the original paper [Graph Attention Networks. P. Veličković et al. ICLR 2018 https://arxiv.org/abs/1710.10903], let's build a 2-layer GAT model, with the second layer being the classifier that predicts paper subject: it thus should have the output size of `train_targets.shape[1]` (7 subjects) and a softmax activation.
```
gat = GAT(
layer_sizes=[8, train_targets.shape[1]],
activations=["elu", "softmax"],
attn_heads=8,
generator=generator,
in_dropout=0.5,
attn_dropout=0.5,
normalize=None,
)
```
Expose the input and output tensors of the GAT model for node prediction, via GAT.in_out_tensors() method:
```
x_inp, predictions = gat.in_out_tensors()
```
### Training the model
Now let's create the actual Keras model with the input tensors `x_inp` and output tensors being the predictions `predictions` from the final dense layer
```
model = Model(inputs=x_inp, outputs=predictions)
model.compile(
optimizer=optimizers.Adam(lr=0.005),
loss=losses.categorical_crossentropy,
metrics=["acc"],
)
```
Train the model, keeping track of its loss and accuracy on the training set, and its generalisation performance on the validation set (we need to create another generator over the validation data for this)
```
val_gen = generator.flow(val_subjects.index, val_targets)
```
Create callbacks for early stopping (if validation accuracy stops improving) and best model checkpoint saving:
```
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
if not os.path.isdir("logs"):
os.makedirs("logs")
es_callback = EarlyStopping(
monitor="val_acc", patience=20
) # patience is the number of epochs to wait before early stopping in case of no further improvement
mc_callback = ModelCheckpoint(
"logs/best_model.h5", monitor="val_acc", save_best_only=True, save_weights_only=True
)
```
Train the model
```
history = model.fit(
train_gen,
epochs=50,
validation_data=val_gen,
verbose=2,
shuffle=False, # this should be False, since shuffling data means shuffling the whole graph
callbacks=[es_callback, mc_callback],
)
```
Plot the training history:
```
sg.utils.plot_history(history)
```
Reload the saved weights of the best model found during the training (according to validation accuracy)
```
model.load_weights("logs/best_model.h5")
```
Evaluate the best model on the test set
```
test_gen = generator.flow(test_subjects.index, test_targets)
test_metrics = model.evaluate(test_gen)
print("\nTest Set Metrics:")
for name, val in zip(model.metrics_names, test_metrics):
print("\t{}: {:0.4f}".format(name, val))
```
### Making predictions with the model
Now let's get the predictions for all nodes:
```
all_nodes = node_subjects.index
all_gen = generator.flow(all_nodes)
all_predictions = model.predict(all_gen)
```
These predictions will be the output of the softmax layer, so to get final categories we'll use the `inverse_transform` method of our target attribute specification to turn these values back to the original categories
Note that for full-batch methods the batch size is 1 and the predictions have shape $(1, N_{nodes}, N_{classes})$ so we we remove the batch dimension to obtain predictions of shape $(N_{nodes}, N_{classes})$.
```
node_predictions = target_encoding.inverse_transform(all_predictions.squeeze())
```
Let's have a look at a few predictions after training the model:
```
df = pd.DataFrame({"Predicted": node_predictions, "True": node_subjects})
df.head(20)
```
## Node embeddings
Evaluate node embeddings as activations of the output of the 1st GraphAttention layer in GAT layer stack (the one before the top classification layer predicting paper subjects), and visualise them, coloring nodes by their true subject label. We expect to see nice clusters of papers in the node embedding space, with papers of the same subject belonging to the same cluster.
Let's create a new model with the same inputs as we used previously `x_inp` but now the output is the embeddings rather than the predicted class. We find the embedding layer by taking the first graph attention layer in the stack of Keras layers. Additionally note that the weights trained previously are kept in the new model.
```
emb_layer = next(l for l in model.layers if l.name.startswith("graph_attention"))
print(
"Embedding layer: {}, output shape {}".format(emb_layer.name, emb_layer.output_shape)
)
embedding_model = Model(inputs=x_inp, outputs=emb_layer.output)
```
The embeddings can now be calculated using the predict function. Note that the embeddings returned are 64 dimensional features (8 dimensions for each of the 8 attention heads) for all nodes.
```
emb = embedding_model.predict(all_gen)
emb.shape
```
Project the embeddings to 2d using either TSNE or PCA transform, and visualise, coloring nodes by their true subject label
```
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import pandas as pd
import numpy as np
```
Note that the embeddings from the GAT model have a batch dimension of 1 so we `squeeze` this to get a matrix of $N_{nodes} \times N_{emb}$.
```
X = emb.squeeze()
y = np.argmax(target_encoding.transform(node_subjects), axis=1)
if X.shape[1] > 2:
transform = TSNE # PCA
trans = transform(n_components=2)
emb_transformed = pd.DataFrame(trans.fit_transform(X), index=list(G.nodes()))
emb_transformed["label"] = y
else:
emb_transformed = pd.DataFrame(X, index=list(G.nodes()))
emb_transformed = emb_transformed.rename(columns={"0": 0, "1": 1})
emb_transformed["label"] = y
alpha = 0.7
fig, ax = plt.subplots(figsize=(7, 7))
ax.scatter(
emb_transformed[0],
emb_transformed[1],
c=emb_transformed["label"].astype("category"),
cmap="jet",
alpha=alpha,
)
ax.set(aspect="equal", xlabel="$X_1$", ylabel="$X_2$")
plt.title(
"{} visualization of GAT embeddings for cora dataset".format(transform.__name__)
)
plt.show()
```
<table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/node-classification/gat-node-classification.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/node-classification/gat-node-classification.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
|
github_jupyter
|
# State preparation with the SLM mask
## Basics
When performing quantum computations with global pulses, it might be hard to prepare the system in an arbitrary initial state. This is especially true in the XY mode, where only a global $\sigma^x$ pulse can produce excitations whose number is otherwise conserved during free evolution. A partial solution to this problem is to utilize an SLM mask. <br>
Assume a system of three qubits in XY mode is initially in state $\left| \downarrow \downarrow \downarrow \right\rangle$, and that we are interested in preparing the state $\left| \uparrow \downarrow \downarrow \right\rangle$. Acting naively with a global $\sigma^x$ pulse of area $\pi$ would result in state $\left| \uparrow \uparrow \uparrow \right\rangle$. Using an SLM pattern, however, it is possible to detune the last two qubits away from resonance, and the same global $\sigma^x$ pulse will produced instead the desired state $\left| \uparrow \downarrow \downarrow \right\rangle$. <br>
Let's see how it works in practice. First create the register:
```
import numpy as np
from pulser import Pulse, Sequence, Register
from pulser.devices import MockDevice
from pulser.waveforms import BlackmanWaveform
from pulser.simulation import Simulation
# Qubit register
qubits = {"q0": (-5,0), "q1": (0,0), "q2": (5,0)}
reg = Register(qubits)
reg.draw()
```
Now create the sequence and add a global $\sigma^x$ pulse of area $\pi$ in XY mode:
```
# Create the sequence
seq = Sequence(reg, MockDevice)
# Declare a global XY channel and add the pi pulse
seq.declare_channel('ch', 'mw_global')
pulse = Pulse.ConstantDetuning(BlackmanWaveform(200, np.pi), 0, 0)
seq.add(pulse, 'ch')
```
Drawing the sequence will show the following:
```
seq.draw()
```
To set up the SLM mask all we need to do is to create a list that contains the name of the qubits that we want to mask, and pass it to the $\verb:Sequence.config_slm_mask:$ method:
```
# Mask the last two qubits
masked_qubits = ["q1", "q2"]
seq.config_slm_mask(masked_qubits)
```
At this point it is possible to visualize the mask by drawing the sequence. The masked pulse will appear with a shaded background, and the names of the masked qubits will be shown in the bottom left corner.
```
seq.draw()
```
The sequence drawing method also allows to visualize the register. If an SLM mask is defined, the masked qubits will appear with a shaded square halo around them:
```
seq.draw(draw_register=True)
```
Now let's see how the system evolves under this masked pulse. Since the pulse only acts on the first qubit, we expect the final state to be $\left| \uparrow \downarrow \downarrow \right\rangle$, or, according to Pulser's conventions for XY basis states, $(1,0)^T \otimes (0,1)^T \otimes (0,1)^T$ in the Hilbert space $C^8$:
```
import qutip
qutip.tensor(qutip.basis(2, 0), qutip.basis(2, 1), qutip.basis(2, 1))
```
Now run the simulation and print the final state as given by Pulser:
```
sim = Simulation(seq)
results = sim.run()
results.get_final_state()
```
As expected, the two states agree up to numerical errors.
## Notes
Since the SLM mask is mostly useful for state preparation, its use in Pulser is restricted to the first pulse in the sequence. This can be seen by adding an extra pulse in the previous example and drawing the sequence:
```
seq.add(pulse, 'ch')
seq.draw()
```
This example also illustrates the fact that the SLM mask can be configured at any moment during the creation of a sequence (either before or after adding pulses) and it will automatically latch to the first pulse. <br>
However, in order to reflect real hardware constraints, the mask can be configured only once. Trying to configure the mask a second time will raise an error:
```
try:
seq.config_slm_mask(masked_qubits)
except ValueError as err:
print(err)
```
Although the example shown here makes use of the XY mode, everything translates directly to the Ising mode as well with the same syntax and restrictions.
|
github_jupyter
|
```
# general imports
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import torch.utils.data as utils
import matplotlib.pyplot as plt
plt.rcParams["legend.loc"] = "best"
plt.rcParams['figure.facecolor'] = 'white'
%matplotlib inline
# filter python warnings
import warnings
warnings.filterwarnings("ignore")
# prepare Fashion MNIST data
import torchvision.datasets as datasets
# train data
mnist_trainset = datasets.FashionMNIST(root='./data/fashion', train=True, download=True, transform=None)
mnist_train_images = mnist_trainset.train_data.numpy()[..., np.newaxis]
mnist_train_labels = mnist_trainset.train_labels.numpy()
# test data
mnist_testset = datasets.FashionMNIST(root='./data/fashion', train=False, download=True, transform=None)
mnist_test_images = mnist_testset.test_data.numpy()[..., np.newaxis]
mnist_test_labels = mnist_testset.test_labels.numpy()
# The Deep Convolution Random Forest class (for binary classification)
class ConvRF(object):
def __init__(self, kernel_size=5, stride=2):
self.kernel_size = kernel_size
self.stride = stride
self.kernel_forests = None
def _convolve_chop(self, images, labels=None, flatten=False):
batch_size, in_dim, _, num_channels = images.shape
out_dim = int((in_dim - self.kernel_size) / self.stride) + 1 # calculate output dimensions
# create matrix to hold the chopped images
out_images = np.zeros((batch_size, out_dim, out_dim,
self.kernel_size, self.kernel_size, num_channels))
out_labels = None
curr_y = out_y = 0
# move kernel vertically across the image
while curr_y + self.kernel_size <= in_dim:
curr_x = out_x = 0
# move kernel horizontally across the image
while curr_x + self.kernel_size <= in_dim:
# chop images
out_images[:, out_x, out_y] = images[:, curr_x:curr_x +
self.kernel_size, curr_y:curr_y+self.kernel_size, :]
curr_x += self.stride
out_x += 1
curr_y += self.stride
out_y += 1
if flatten:
out_images = out_images.reshape(batch_size, out_dim, out_dim, -1)
if labels is not None:
out_labels = np.zeros((batch_size, out_dim, out_dim))
out_labels[:, ] = labels.reshape(-1, 1, 1)
return out_images, out_labels
def convolve_fit(self, images, labels):
num_channels = images.shape[-1]
sub_images, sub_labels = self._convolve_chop(images, labels=labels, flatten=True)
batch_size, out_dim, _, _ = sub_images.shape
self.kernel_forests = np.zeros((out_dim, out_dim), dtype=np.int).tolist()
convolved_image = np.zeros((images.shape[0], out_dim, out_dim, 1))
for i in range(out_dim):
for j in range(out_dim):
self.kernel_forests[i][j] = RandomForestClassifier(n_estimators=32)
self.kernel_forests[i][j].fit(sub_images[:, i, j], sub_labels[:, i, j])
convolved_image[:, i, j] = self.kernel_forests[i][j].predict_proba(sub_images[:, i, j])[..., 1][..., np.newaxis]
return convolved_image
def convolve_predict(self, images):
if not self.kernel_forests:
raise Exception("Should fit training data before predicting")
num_channels = images.shape[-1]
sub_images, _ = self._convolve_chop(images, flatten=True)
batch_size, out_dim, _, _ = sub_images.shape
kernel_predictions = np.zeros((images.shape[0], out_dim, out_dim, 1))
for i in range(out_dim):
for j in range(out_dim):
kernel_predictions[:, i, j] = self.kernel_forests[i][j].predict_proba(sub_images[:, i, j])[..., 1][..., np.newaxis]
return kernel_predictions
# define a simple CNN arhcitecture
from torch.autograd import Variable
import torch.nn.functional as F
class SimpleCNNOneFilter(torch.nn.Module):
def __init__(self):
super(SimpleCNNOneFilter, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 1, kernel_size=10, stride=2)
self.fc1 = torch.nn.Linear(100, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = x.view(-1, 100)
x = self.fc1(x)
return(x)
class SimpleCNN32Filter(torch.nn.Module):
def __init__(self):
super(SimpleCNN32Filter, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=10, stride=2) # try 64 too, if possible
self.fc1 = torch.nn.Linear(100*32, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = x.view(-1, 100*32)
x = self.fc1(x)
return(x)
class SimpleCNN32Filter2Layers(torch.nn.Module):
def __init__(self):
super(SimpleCNN32Filter2Layers, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=10, stride=2)
self.conv2 = torch.nn.Conv2d(32, 32, kernel_size=7, stride=1)
self.fc1 = torch.nn.Linear(16*32, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1, 16*32)
x = self.fc1(x)
return(x)
def run_naive_rf(train_images, train_labels, test_images, test_labels, fraction_of_train_samples, class1=3, class2=8):
num_train_samples_class_1 = int(np.sum(train_labels==class1) * fraction_of_train_samples)
num_train_samples_class_2 = int(np.sum(train_labels==class2) * fraction_of_train_samples)
# get only train images and labels for class 1 and class 2
train_images = np.concatenate([train_images[train_labels==class1][:num_train_samples_class_1], train_images[train_labels==class2][:num_train_samples_class_2]])
train_labels = np.concatenate([np.repeat(0, num_train_samples_class_1), np.repeat(1, num_train_samples_class_2)])
# get only test images and labels for class 1 and class 2
test_images = np.concatenate([test_images[test_labels==class1], test_images[test_labels==class2]])
test_labels = np.concatenate([np.repeat(0, np.sum(test_labels==class1)), np.repeat(1, np.sum(test_labels==class2))])
# Train
clf = RandomForestClassifier(n_estimators=100)
clf.fit(train_images.reshape(-1, 28*28*1), train_labels)
# Test
test_preds = clf.predict(test_images.reshape(-1, 28*28*1))
return accuracy_score(test_labels, test_preds)
def run_one_layer_deep_conv_rf(train_images, train_labels, test_images, test_labels, fraction_of_train_samples, class1=3, class2=8):
num_train_samples_class_1 = int(np.sum(train_labels==class1) * fraction_of_train_samples)
num_train_samples_class_2 = int(np.sum(train_labels==class2) * fraction_of_train_samples)
# get only train images and labels for class 1 and class 2
train_images = np.concatenate([train_images[train_labels==class1][:num_train_samples_class_1], train_images[train_labels==class2][:num_train_samples_class_2]])
train_labels = np.concatenate([np.repeat(0, num_train_samples_class_1), np.repeat(1, num_train_samples_class_2)])
# get only test images and labels for class 1 and class 2
test_images = np.concatenate([test_images[test_labels==class1], test_images[test_labels==class2]])
test_labels = np.concatenate([np.repeat(0, np.sum(test_labels==class1)), np.repeat(1, np.sum(test_labels==class2))])
## Train
# ConvRF (layer 1)
conv1 = ConvRF(kernel_size=10, stride=2)
conv1_map = conv1.convolve_fit(train_images, train_labels)
# Full RF
conv1_full_RF = RandomForestClassifier(n_estimators=100)
conv1_full_RF.fit(conv1_map.reshape(len(train_images), -1), train_labels)
## Test (after ConvRF 1 and Full RF)
conv1_map_test = conv1.convolve_predict(test_images)
mnist_test_preds = conv1_full_RF.predict(conv1_map_test.reshape(len(test_images), -1))
return accuracy_score(test_labels, mnist_test_preds)
def run_two_layer_deep_conv_rf(train_images, train_labels, test_images, test_labels, fraction_of_train_samples, class1=3, class2=8):
num_train_samples_class_1 = int(np.sum(train_labels==class1) * fraction_of_train_samples)
num_train_samples_class_2 = int(np.sum(train_labels==class2) * fraction_of_train_samples)
# get only train images and labels for class 1 and class 2
train_images = np.concatenate([train_images[train_labels==class1][:num_train_samples_class_1], train_images[train_labels==class2][:num_train_samples_class_2]])
train_labels = np.concatenate([np.repeat(0, num_train_samples_class_1), np.repeat(1, num_train_samples_class_2)])
# get only test images and labels for class 1 and class 2
test_images = np.concatenate([test_images[test_labels==class1], test_images[test_labels==class2]])
test_labels = np.concatenate([np.repeat(0, np.sum(test_labels==class1)), np.repeat(1, np.sum(test_labels==class2))])
## Train
# ConvRF (layer 1)
conv1 = ConvRF(kernel_size=10, stride=2)
conv1_map = conv1.convolve_fit(train_images, train_labels)
# ConvRF (layer 2)
conv2 = ConvRF(kernel_size=7, stride=1)
conv2_map = conv2.convolve_fit(conv1_map, train_labels)
# Full RF
conv1_full_RF = RandomForestClassifier(n_estimators=100)
conv1_full_RF.fit(conv2_map.reshape(len(train_images), -1), train_labels)
## Test (after ConvRF 1 and Full RF)
conv1_map_test = conv1.convolve_predict(test_images)
conv2_map_test = conv2.convolve_predict(conv1_map_test)
test_preds = conv1_full_RF.predict(conv2_map_test.reshape(len(test_images), -1))
return accuracy_score(test_labels, test_preds)
def cnn_train_test(cnn_model, x_train, y_train, x_test, y_test):
# set params
num_epochs = 25
learning_rate = 0.001
# prepare data
tensor_x = torch.stack([torch.Tensor(i.reshape(1, 28, 28)) for i in x_train]).float() # transform to torch tensors
tensor_y = torch.stack([torch.Tensor([i]) for i in y_train]).long()
my_dataset = utils.TensorDataset(tensor_x,tensor_y) # create your datset
train_loader = utils.DataLoader(my_dataset, batch_size=64, shuffle=True) # create your dataloader
tensor_x = torch.stack([torch.Tensor(i.reshape(1, 28, 28)) for i in x_test]).float() # transform to torch tensors
tensor_y = torch.stack([torch.Tensor([i]) for i in y_test]).long()
my_dataset = utils.TensorDataset(tensor_x,tensor_y) # create your datset
test_loader = utils.DataLoader(my_dataset, batch_size=64) # create your dataloader
# define model
model = cnn_model()
# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images
labels = labels
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels.view(-1))
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# test the model
accuracy = 0
model.eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images
labels = labels
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.view(-1) == labels.view(-1)).sum()
accuracy = float(correct) / float(total)
return accuracy
def run_cnn(cnn_model, train_images, train_labels, test_images, test_labels, fraction_of_train_samples, class1=3, class2=8):
num_train_samples_class_1 = int(np.sum(train_labels==class1) * fraction_of_train_samples)
num_train_samples_class_2 = int(np.sum(train_labels==class2) * fraction_of_train_samples)
# get only train images and labels for class 1 and class 2
train_images = np.concatenate([train_images[train_labels==class1][:num_train_samples_class_1], train_images[train_labels==class2][:num_train_samples_class_2]])
train_labels = np.concatenate([np.repeat(0, num_train_samples_class_1), np.repeat(1, num_train_samples_class_2)])
# get only test images and labels for class 1 and class 2
test_images = np.concatenate([test_images[test_labels==class1], test_images[test_labels==class2]])
test_labels = np.concatenate([np.repeat(0, np.sum(test_labels==class1)), np.repeat(1, np.sum(test_labels==class2))])
return cnn_train_test(cnn_model, train_images, train_labels, test_images, test_labels)
# Sneaker(7) vs Ankel Boot(9) classification
# get only train images and labels for two classes: 7 and 9
mnist_train_images_7_9 = np.concatenate([mnist_train_images[mnist_train_labels==7], mnist_train_images[mnist_train_labels==9]])
mnist_train_labels_7_9 = np.concatenate([np.repeat(0, np.sum(mnist_train_labels==7)), np.repeat(1, np.sum(mnist_train_labels==9))])
# visualize data and labels
# 7 (label 0)
index = 3000
print("Label:", mnist_train_labels_7_9[index])
plt.imshow(mnist_train_images_7_9[index].reshape(28, 28),cmap='gray')
plt.show()
# 9 (label 1)
index = 8000
print("Label:", mnist_train_labels_7_9[index])
plt.imshow(mnist_train_images_7_9[index].reshape(28, 28),cmap='gray')
plt.show()
# accuracy vs num training samples (naive_rf)
naive_rf_acc_vs_n = list()
fraction_of_train_samples_space = np.geomspace(0.01, 1.0, num=10)
for fraction_of_train_samples in fraction_of_train_samples_space:
best_accuracy = np.mean([run_naive_rf(mnist_train_images, mnist_train_labels, mnist_test_images, mnist_test_labels, fraction_of_train_samples, 7, 9) for _ in range(3)])
naive_rf_acc_vs_n.append(best_accuracy)
print("Train Fraction:", str(fraction_of_train_samples))
print("Accuracy:", str(best_accuracy))
# accuracy vs num training samples (one layer deep_conv_rf)
deep_conv_rf_acc_vs_n = list()
fraction_of_train_samples_space = np.geomspace(0.01, 1.0, num=10)
for fraction_of_train_samples in fraction_of_train_samples_space:
best_accuracy = np.mean([run_one_layer_deep_conv_rf(mnist_train_images, mnist_train_labels, mnist_test_images, mnist_test_labels, fraction_of_train_samples, 7, 9) for _ in range(3)])
deep_conv_rf_acc_vs_n.append(best_accuracy)
print("Train Fraction:", str(fraction_of_train_samples))
print("Accuracy:", str(best_accuracy))
# accuracy vs num training samples (one layer cnn)
cnn_acc_vs_n = list()
fraction_of_train_samples_space = np.geomspace(0.01, 1.0, num=10)
for fraction_of_train_samples in fraction_of_train_samples_space:
best_accuracy = np.mean([run_cnn(SimpleCNNOneFilter, mnist_train_images, mnist_train_labels, mnist_test_images, mnist_test_labels, fraction_of_train_samples, 7, 9) for _ in range(3)])
cnn_acc_vs_n.append(best_accuracy)
print("Train Fraction:", str(fraction_of_train_samples))
print("Accuracy:", str(best_accuracy))
# accuracy vs num training samples (one layer cnn (32 filters))
cnn32_acc_vs_n = list()
fraction_of_train_samples_space = np.geomspace(0.01, 1.0, num=10)
for fraction_of_train_samples in fraction_of_train_samples_space:
best_accuracy = np.mean([run_cnn(SimpleCNN32Filter, mnist_train_images, mnist_train_labels, mnist_test_images, mnist_test_labels, fraction_of_train_samples, 7, 9) for _ in range(3)])
cnn32_acc_vs_n.append(best_accuracy)
print("Train Fraction:", str(fraction_of_train_samples))
print("Accuracy:", str(best_accuracy))
# accuracy vs num training samples (two layer deep_conv_rf)
deep_conv_rf_two_layer_acc_vs_n = list()
fraction_of_train_samples_space = np.geomspace(0.01, 1.0, num=10)
for fraction_of_train_samples in fraction_of_train_samples_space:
best_accuracy = np.mean([run_two_layer_deep_conv_rf(mnist_train_images, mnist_train_labels, mnist_test_images, mnist_test_labels, fraction_of_train_samples, 7, 9) for _ in range(3)])
deep_conv_rf_two_layer_acc_vs_n.append(best_accuracy)
print("Train Fraction:", str(fraction_of_train_samples))
print("Accuracy:", str(best_accuracy))
# accuracy vs num training samples (two layer cnn (32 filters))
cnn32_two_layer_acc_vs_n = list()
fraction_of_train_samples_space = np.geomspace(0.01, 1.0, num=10)
for fraction_of_train_samples in fraction_of_train_samples_space:
best_accuracy = np.mean([run_cnn(SimpleCNN32Filter2Layers, mnist_train_images, mnist_train_labels, mnist_test_images, mnist_test_labels, fraction_of_train_samples, 7, 9) for _ in range(3)])
cnn32_two_layer_acc_vs_n.append(best_accuracy)
print("Train Fraction:", str(fraction_of_train_samples))
print("Accuracy:", str(best_accuracy))
plt.rcParams['figure.figsize'] = 10, 8
plt.rcParams['font.size'] = 20
plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.titlesize'] = 20
fig, ax = plt.subplots() # create a new figure with a default 111 subplot
ax.plot(fraction_of_train_samples_space, naive_rf_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='orange', linewidth=4, label="Naive RF")
ax.plot(fraction_of_train_samples_space, deep_conv_rf_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='green', linewidth=4, label="Deep Conv RF")
ax.plot(fraction_of_train_samples_space, deep_conv_rf_two_layer_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='grey', linewidth=4, label="Deep Conv RF Two Layer")
ax.plot(fraction_of_train_samples_space, cnn_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='red', linewidth=4, label="CNN")
ax.plot(fraction_of_train_samples_space, cnn32_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='brown', linewidth=4, label="CNN (32 filters)")
ax.plot(fraction_of_train_samples_space, cnn32_two_layer_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='black', linewidth=4, label="CNN Two Layer (32 filters)")
ax.set_xlabel('Fraction of Train Samples')
ax.set_xlim(0, 1.0)
ax.set_ylabel('Accuracy')
ax.set_ylim(0.81, 1)
ax.set_title("Sneaker(7) vs Ankel Boot(9) Classification")
plt.legend()
plt.show()
plt.rcParams['figure.figsize'] = 10, 8
plt.rcParams['font.size'] = 20
plt.rcParams['legend.fontsize'] = 20
plt.rcParams['figure.titlesize'] = 20
fig, ax = plt.subplots() # create a new figure with a default 111 subplot
ax.plot(fraction_of_train_samples_space, naive_rf_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='orange', linewidth=4, label="Naive RF")
ax.plot(fraction_of_train_samples_space, deep_conv_rf_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='green', linewidth=4, label="Deep Conv RF")
ax.plot(fraction_of_train_samples_space, deep_conv_rf_two_layer_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='grey', linewidth=4, label="Deep Conv RF 2 Layer")
ax.plot(fraction_of_train_samples_space, cnn_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='red', linewidth=4, label="CNN")
ax.plot(fraction_of_train_samples_space, cnn32_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='brown', linewidth=4, label="CNN (32 filters)")
ax.plot(fraction_of_train_samples_space, cnn32_two_layer_acc_vs_n, marker='X', markerfacecolor='blue', markersize=10, color='black', linewidth=4, label="CNN 2 Layer (32 filters)")
ax.set_xlabel('Fraction of Train Samples')
ax.set_xlim(0, 1.0)
ax.set_ylabel('Accuracy')
ax.set_ylim(0.92, 0.98)
ax.set_title("Sneaker(7) vs Ankel Boot(9) Classification")
plt.legend()
plt.show()
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
%load_ext autoreload
%autoreload 2
```
# Overview
What does this thing look like?
- Object that you can import
- Can call train, load, featurize, import
- Inherits from sklearn.transform? Multiple inheritance is hard...
# I. Load Data
- words: np.ndarray of all characters
- dataset: np.ndarray of character indices
```
import codecs
#=====[ Load a whole corpus ]=====
def load_data(data_dir='./data/tinyshakespeare/'):
vocab = {}
print ('%s/input.txt'% data_dir)
words = codecs.open('%s/input.txt' % data_dir, 'rb', 'utf-8').read()
words = list(words)
dataset = np.ndarray((len(words),), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
dataset[i] = vocab[word]
print 'corpus length (in characters):', len(words)
print 'vocab size:', len(vocab)
return dataset, words, vocab
#print 'corpus length (in characters):', len(words)
#dataset, words, vocab = load_data()
#=====[ Load only the vocabulary ]=====
vocab = pickle.load(open('./data/audit_data/vocab.bin', 'rb'))
ivocab = {i:c for c, i in vocab.items()}
print 'vocab size:', len(vocab)
```
# II. Load Model
```
import pickle
from CharRNN import CharRNN, make_initial_state
from chainer import cuda
#####[ PARAMS ]#####
n_units = 128
seq_length = 50
batchsize = 50
seed = 123
length = 50
####################
np.random.seed(seed)
model = pickle.load(open('./data/audit_data/audit_model.chainermodel', 'rb'))
n_units = model.embed.W.data.shape[1]
initial_state = make_initial_state(n_units, batchsize=1, train=False)
print '# of units: ', n_units
```
# III. Create TextFeaturizer
```
class TextFeaturizer(object):
"""Featurizes Text using a CharRNN"""
def __init__(self, model, vocab):
self.__dict__.update(locals())
self.n_units = model.embed.W.data.shape[1]
def preprocess(self, text):
"""returns preprocessed version of text"""
if not isinstance(text, str):
raise NotImplementedError("Must pass in a string")
return np.array([vocab[c] for c in text]).astype(np.int32)
def featurize(self, text):
"""returns a list of feature vectors for the text"""
#=====[ Step 1: Convert to an array ]=====
dataset = self.preprocess(text)
#=====[ Step 2: Create initial state ]=====
initial_state = make_initial_state(n_units, batchsize=1, train=False)
init_char = np.array([0]).astype(np.int32)
state, prob = rnn.forward_one_step(init_char, init_char, initial_state, train=False)
#=====[ Step 3: Find feature vectors ]=====
states = []
for i in range(len(dataset)):
cur_char = np.array([dataset[i]]).astype(np.int32)
state, prob = model.forward_one_step(cur_char, cur_char, state, train=False)
states.append(state['h2'].data.copy())
#=====[ Step 4: Sanity check ]=====
if not all([s.shape == (1, self.n_units) for s in states]):
raise Exception("For some reason, generated the wrong shape! {}".format(np.array(states).shape))
return states
featurizer = TextFeaturizer(model, vocab)
#=====[ TEST ]=====
text = 'Conducted an investigation of WalMart and concluded air and fire safety were correct'
states = featurizer.featurize(text)
```
|
github_jupyter
|
# Deep Learning on JuiceFS Tutorial - 01. Getting Started
JuiceFS is a shared POSIX file system for the cloud.
You may replace existing solutions with JuiceFS with zero cost, turns any object store into a shared POSIX file system.
Sign up for 1T free quota now at https://juicefs.com
Source code of this tutorial can be found in https://github.com/juicedata/juicefs-dl-tutorial
## 0. Requirements
It's very easy to setup JuiceFS in your remote HPC machine or Google Colab or CoCalc by insert just one line of command into your Jupyter Notebook:
```
!curl -sL https://juicefs.com/static/juicefs -o juicefs && chmod +x juicefs
```
Here we go, let's try the magic of JuiceFS!
## 1. Mounting your JuiceFS
After create your JuiceFS volumn followed by [documentation here](https://juicefs.com/docs/en/getting_started.html), you have two ways to mount your JuiceFS here:
### 1.1 The security way
Just run the mount command, and input your access key and secret key from the public cloud or storage provider. This scene is for people who want to collaborate with others and protecting credentials. It can also let your teammates using their JuiceFS volume or share notebook publicly.
```
!./juicefs mount {JFS_VOLUMN_NAME} /jfs
```
### 1.2 The convenient way
However, maybe you are working alone, no worries about leak credentials, and don't want to do annoying input credentials every time restart kernel. Surely, you can save your token and access secrets in your notebook, just change the corresponding fields in the following command to your own.
```
!./juicefs auth --token {JUICEFS_TOKEN} --accesskey {ACCESSKEY} --secretkey {SECRETKEY} JuiceFS
!./juicefs mount -h
```
## 2. Preparing dataset
Okay, let's assume you have already mounted your JuiceFS volume. You can test by list your file here.
```
!ls /jfs
```
You have many ways to get data into your JuiceFS volume, like mounting in your local machine and directly drag and drop, or mounting in cloud servers and write data or crawling data and save. Here we took the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) (with a training set of 60,000 images, and a test set of 10,000 images) as an example. If you have not to get the MNIST dataset ready, you can execute the following block:
```
!curl -sL https://s3.amazonaws.com/img-datasets/mnist.npz -o /jfs/mnist.npz
```
## 3. Training model
Once we have got our dataset ready in JuiceFS, we can begin the training process.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
import warnings
warnings.simplefilter(action='ignore')
```
Firstly, load our MNIST dataset from JuiceFS volume.
```
with np.load('/jfs/mnist.npz') as f:
X_train, y_train = f['x_train'], f['y_train']
X_test, y_test = f['x_test'], f['y_test']
```
Visualize some data to ensure we have successfully loaded data from JuiceFS.
```
sns.countplot(y_train)
fig, ax = plt.subplots(6, 6, figsize = (12, 12))
fig.suptitle('First 36 images in MNIST')
fig.tight_layout(pad = 0.3, rect = [0, 0, 0.9, 0.9])
for x, y in [(i, j) for i in range(6) for j in range(6)]:
ax[x, y].imshow(X_train[x + y * 6].reshape((28, 28)), cmap = 'gray')
ax[x, y].set_title(y_train[x + y * 6])
```
Cool! We have successfully loaded the MNIST dataset from JuiceFS! Let's training a CNN model.
```
batch_size = 128
num_classes = 10
epochs = 12
img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
## 4. Saving model
Awesome! We have trained a simple CNN model, now let's try to write back the model into JuiceFS. Thanks to the POSIX-compatible feature of JuiceFS, we can easily save the model as usual. No additional effort need.
```
model.save('/jfs/mnist_model.h5')
```
## 5. Loading model
Assuming you want to debug the model in your local machine or want to sync with the production environment. You can load your model from JuiceFS in any machine in real time. JuiceFS's strong consistency feature will ensure all confirmed changes made to your data reflected in different machines immediately.
```
from keras.models import load_model
model_from_jfs = load_model('/jfs/mnist_model.h5')
```
We have successfully load our previous model from JuiceFS here, let's randomly pick an image from test dataset and use loader model to make a prediction.
```
import random
pick_idx = random.randint(0, X_test.shape[0])
```
What image have we picked?
```
plt.imshow(X_test[pick_idx].reshape((28, 28)), cmap = 'gray')
```
Let's do prediction using the model loaded from JuiceFS.
```
y_pred = np.argmax(model_from_jfs.predict(np.expand_dims(X_test[pick_idx], axis=0)))
print(f'Prediction: {y_pred}')
```
That's it. We will cover some advanced usages and public datasets in the next tutorials.
|
github_jupyter
|
[source](../../api/alibi_detect.cd.mmd_online.rst)
# Online Maximum Mean Discrepancy
## Overview
The online [Maximum Mean Discrepancy (MMD)](http://jmlr.csail.mit.edu/papers/v13/gretton12a.html) detector is a kernel-based method for online drift detection. The MMD is a distance-based measure between 2 distributions *p* and *q* based on the mean embeddings $\mu_{p}$ and $\mu_{q}$ in a reproducing kernel Hilbert space $F$:
$$
MMD(F, p, q) = || \mu_{p} - \mu_{q} ||^2_{F}
$$
Given reference samples $\{X_i\}_{i=1}^{N}$ and test samples $\{Y_i\}_{i=t}^{t+W}$ we may compute an unbiased estimate $\widehat{MMD}^2(F, \{X_i\}_{i=1}^N, \{Y_i\}_{i=t}^{t+W})$ of the squared MMD between the two underlying distributions. The estimate can be updated at low-cost as new data points enter into the test-window. We use by default a [radial basis function kernel](https://en.wikipedia.org/wiki/Radial_basis_function_kernel), but users are free to pass their own kernel of preference to the detector.
Online detectors assume the reference data is large and fixed and operate on single data points at a time (rather than batches). These data points are passed into the test-window and a two-sample test-statistic (in this case squared MMD) between the reference data and test-window is computed at each time-step. When the test-statistic exceeds a preconfigured threshold, drift is detected. Configuration of the thresholds requires specification of the expected run-time (ERT) which specifies how many time-steps that the detector, on average, should run for in the absence of drift before making a false detection. It also requires specification of a test-window size, with smaller windows allowing faster response to severe drift and larger windows allowing more power to detect slight drift.
For high-dimensional data, we typically want to reduce the dimensionality before passing it to the detector. Following suggestions in [Failing Loudly: An Empirical Study of Methods for Detecting Dataset Shift](https://arxiv.org/abs/1810.11953), we incorporate Untrained AutoEncoders (UAE) and black-box shift detection using the classifier's softmax outputs ([BBSDs](https://arxiv.org/abs/1802.03916)) as out-of-the box preprocessing methods and note that [PCA](https://en.wikipedia.org/wiki/Principal_component_analysis) can also be easily implemented using `scikit-learn`. Preprocessing methods which do not rely on the classifier will usually pick up drift in the input data, while BBSDs focuses on label shift.
Detecting input data drift (covariate shift) $\Delta p(x)$ for text data requires a custom preprocessing step. We can pick up changes in the semantics of the input by extracting (contextual) embeddings and detect drift on those. Strictly speaking we are not detecting $\Delta p(x)$ anymore since the whole training procedure (objective function, training data etc) for the (pre)trained embeddings has an impact on the embeddings we extract. The library contains functionality to leverage pre-trained embeddings from [HuggingFace's transformer package](https://github.com/huggingface/transformers) but also allows you to easily use your own embeddings of choice. Both options are illustrated with examples in the [Text drift detection on IMDB movie reviews](../../examples/cd_text_imdb.ipynb) notebook.
## Usage
### Initialize
Arguments:
* `x_ref`: Data used as reference distribution.
* `ert`: The expected run-time in the absence of drift, starting from *t=0*.
* `window_size`: The size of the sliding test-window used to compute the test-statistic. Smaller windows focus on responding quickly to severe drift, larger windows focus on ability to detect slight drift.
Keyword arguments:
* `backend`: Backend used for the MMD implementation and configuration.
* `preprocess_fn`: Function to preprocess the data before computing the data drift metrics.
* `kernel`: Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
* `sigma`: Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
* `n_bootstraps`: The number of bootstrap simulations used to configure the thresholds. The larger this is the more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude larger than the ERT.
* `verbose`: Whether or not to print progress during configuration.
* `input_shape`: Shape of input data.
* `data_type`: Optionally specify the data type (tabular, image or time-series). Added to metadata.
Additional PyTorch keyword arguments:
* `device`: Device type used. The default None tries to use the GPU and falls back on CPU if needed. Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
Initialized drift detector example:
```python
from alibi_detect.cd import MMDDriftOnline
cd = MMDDriftOnline(x_ref, ert, window_size, backend='tensorflow')
```
The same detector in PyTorch:
```python
cd = MMDDriftOnline(x_ref, ert, window_size, backend='pytorch')
```
We can also easily add preprocessing functions for both frameworks. The following example uses a randomly initialized image encoder in PyTorch:
```python
from functools import partial
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import preprocess_drift
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# define encoder
encoder_net = nn.Sequential(
nn.Conv2d(3, 64, 4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 128, 4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(128, 512, 4, stride=2, padding=0),
nn.ReLU(),
nn.Flatten(),
nn.Linear(2048, 32)
).to(device).eval()
# define preprocessing function
preprocess_fn = partial(preprocess_drift, model=encoder_net, device=device, batch_size=512)
cd = MMDDriftOnline(x_ref, ert, window_size, backend='pytorch', preprocess_fn=preprocess_fn)
```
The same functionality is supported in TensorFlow and the main difference is that you would import from `alibi_detect.cd.tensorflow import preprocess_drift`. Other preprocessing steps such as the output of hidden layers of a model or extracted text embeddings using transformer models can be used in a similar way in both frameworks. TensorFlow example for the hidden layer output:
```python
from alibi_detect.cd.tensorflow import HiddenOutput, preprocess_drift
model = # TensorFlow model; tf.keras.Model or tf.keras.Sequential
preprocess_fn = partial(preprocess_drift, model=HiddenOutput(model, layer=-1), batch_size=128)
cd = MMDDriftOnline(x_ref, ert, window_size, backend='tensorflow', preprocess_fn=preprocess_fn)
```
Check out the [Online Drift Detection on the Wine Quality Dataset](../../examples/cd_online_wine.ipynb) example for more details.
Alibi Detect also includes custom text preprocessing steps in both TensorFlow and PyTorch based on Huggingface's [transformers](https://github.com/huggingface/transformers) package:
```python
import torch
import torch.nn as nn
from transformers import AutoTokenizer
from alibi_detect.cd.pytorch import preprocess_drift
from alibi_detect.models.pytorch import TransformerEmbedding
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_name = 'bert-base-cased'
tokenizer = AutoTokenizer.from_pretrained(model_name)
embedding_type = 'hidden_state'
layers = [5, 6, 7]
embed = TransformerEmbedding(model_name, embedding_type, layers)
model = nn.Sequential(embed, nn.Linear(768, 256), nn.ReLU(), nn.Linear(256, enc_dim)).to(device).eval()
preprocess_fn = partial(preprocess_drift, model=model, tokenizer=tokenizer, max_len=512, batch_size=32)
# initialise drift detector
cd = MMDDriftOnline(x_ref, ert, window_size, backend='pytorch', preprocess_fn=preprocess_fn)
```
Again the same functionality is supported in TensorFlow but with `from alibi_detect.cd.tensorflow import preprocess_drift` and `from alibi_detect.models.tensorflow import TransformerEmbedding` imports.
### Detect Drift
We detect data drift by sequentially calling `predict` on single instances `x_t` (no batch dimension) as they each arrive. We can return the test-statistic and the threshold by setting `return_test_stat` to *True*.
The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys:
* `is_drift`: 1 if the test-window (of the most recent `window_size` observations) has drifted from the reference data and 0 otherwise.
* `time`: The number of observations that have been so far passed to the detector as test instances.
* `ert`: The expected run-time the detector was configured to run at in the absence of drift.
* `test_stat`: MMD^2 metric between the reference data and the test_window if `return_test_stat` equals *True*.
* `threshold`: The value the test-statsitic is required to exceed for drift to be detected if `return_test_stat` equals *True*.
```python
preds = cd.predict(x_t, return_test_stat=True)
```
Resetting the detector with the same reference data and thresholds but with a new and empty test-window is straight-forward:
```python
cd.reset()
```
## Examples
[Online Drift Detection on the Wine Quality Dataset](../../examples/cd_online_wine.ipynb)
[Online Drift Detection on the Camelyon medical imaging dataset](../../examples/cd_online_camelyon.ipynb)
|
github_jupyter
|
# Probability theory
## Random experiment
When we toss an unbiased coin, we say that it lands heads up with probability $\frac{1}{2}$ and tails up with probability $\frac{1}{2}$.
Such a coin toss is an example of a **random experiment** and the set of **outcomes** of this random experiment is the **sample space** $\Omega = \{h, t\}$, where $h$ stands for "heads" and $t$ stands for tails.
What if we toss a coin twice? We could view the two coin tosses as a single random experiment with the sample space $\Omega = \{hh, ht, th, tt\}$, where $ht$ (for example) denotes "heads on the first toss", "tails on the second toss".
What if, instead of tossing a coin, we roll a die? The sample space for this random experiment is $\Omega = \{1, 2, 3, 4, 5, 6\}$.
## Events
An **event**, then, is a subset of the sample space. In our example of the two consecutive coin tosses, getting heads on all coin tosses is an event:
$$A = \text{"getting heads on all coin tosses"} = \{hh\} \subseteq \{hh, ht, th, tt\} = \Omega.$$
Getting distinct results on the two coin tosses is also an event:
$$D = \{ht, th\} \subseteq \{hh, ht, th, tt\} = \Omega.$$
We can simulate a coin toss in Python as follows:
```
import numpy as np
np.random.seed(42)
np.random.randint(0, 2)
```
(Let's say 0 is heads and 1 is tails.)
Similarly, in our roll-of-a-die example, the following are all events:
$$S = \text{"six shows up"} = \{6\} \subseteq \{1, 2, 3, 4, 5, 6\} = \Omega,$$
$$E = \text{"even number shows up"} = \{2, 4, 6\} \subseteq \{1, 2, 3, 4, 5, 6\} = \Omega,$$
$$O = \text{"odd number shows up"} = \{1, 3, 5\} \subseteq \{1, 2, 3, 4, 5, 6\} = \Omega.$$
The empty set, $\emptyset = \{\}$, represents the **impossible event**, whereas the sample space $\Omega$ itself represents the **certain event**: one of the numbers $1, 2, 3, 4, 5, 6$ always occurs when a die is rolled, so $\Omega$ always occurs.
We can simulate the roll of a die in Python as follows:
```
np.random.randint(1, 7)
```
If we get 4, say, $S$ has not occurred, since $4 \notin S$; $E$ has occurred, since $4 \in E$; $O$ has not occurred, since $4 \notin O$.
When all outcomes are equally likely, and the sample space is finite, the probability of an event $A$ is given by $$\mathbb{P}(A) = \frac{|A|}{|\Omega|},$$ where $|\cdot|$ denotes the number of elements in a given set.
Thus, the probability of the event $E$, "even number shows up" is equal to $$\mathbb{P}(A) = \frac{|E|}{|\Omega|} = \frac{3}{6} = \frac{1}{2}.$$
If Python's random number generator is decent enough, we should get pretty close to this number by simulating die rolls:
```
outcomes = np.random.randint(1, 7, 100)
len([x for x in outcomes if x % 2 == 0]) / len(outcomes)
```
Here we have used 100 simulated "rolls". If we used, 1000000, say, we would get even closer to $\frac{1}{2}$:
```
outcomes = np.random.randint(1, 7, 1000000)
len([x for x in outcomes if x % 2 == 0]) / len(outcomes)
```
|
github_jupyter
|
# NLP 2 - Pré Processamento de Textos e Modelos Modernos
Fala galera! Na aula passada, tivemos uma introdução ao mundo de NLP: o modelo BoW (Bag of Words) e o algoritmo TF-iDF. Embora muito práticos, observamos alguns fenômenos de NLP e dessas técnicas:
- NLP é naturalmente um problema de grandes dimensionalidades, o que nos leva a cair no caso de "curse of dimensionality"
- O modelo BoW, mesmo com o conceito de N-Grams, tem dificuldades para carregar informação sequencial de palavras, uma vez que ele só pega sequências de termos, não de conceitos
- Entender e implementar conceitos de linguística é importantíssimo para que o processamento-modelagem produza uma boa performance. Dessa forma, NLP é norteado pelo entendimento linguístico
<br>
Dito isso, hoje no mundo de NLP temos ferramentas, approaches e tecnologias que implementam de formas mais eficientes conceitos linguísticos para que possamos realizar melhores modelagens. Nessa aula, veremos essa técnicas com as bibliotecas SpaCy, gensim e a arquitetura word2vec! Para quem não tem SpaCy ou gensim no computador, retire o comentário e rode as células abaixo:
```
# ! pip install spacy
# ! pip install gensim
```
## SpaCy Basics
```
import spacy
# Precisamos instanciar um objeto de NLP especificando qual linguagem ele utilizará.
# No caso, vamos começar com português
nlp = spacy.load('pt')
```
Opa, deu um erro no comando acima! O SpaCy precisa não somente ser instalado, mas seus pacotes linguísticos precisam ser baixados também. Retire os comentários e rode as células abaixo para fazer o download dos pacotes English e Português
```
# ! python -m spacy download en
# ! python -m spacy download pt
```
Ok! Agora tudo certo para começarmos a mexer com o SpaCy. Vamos instanciar a ferramenta linguística para português
```
nlp = spacy.load('pt')
# Vamos criar um documento para testes e demonstrações do SpaCy!
# É muito importante que os textos passados estejam em encoding unicode,
# por isso a presença do u antes da string
doc = nlp(u'Você encontrou o livro que eu te falei, Carla?')
doc.text.split()
```
Ok, temos um problema de pontuação aqui: o método split (ou REGEX em geral) não entende que a vírgula é uma entidade - vamos chamar essas entidades de tokens. Assim, não faz muito sentir quebrar o texto com esses métodos. Vamos utilizar uma compreensão de lista pra isso. O `nlp` consegue entender a diferença entre eles e, portanto, quando usamos os tokens dentro da estrutura do documento, temos uma divisão mais coerente:
```
tokens = [token for token in doc]
tokens
```
Para extrair as strings de cada token, utilizamos `orth_`:
```
[token.orth_ for token in doc]
```
Podemos ver que o SpaCy consegue entender a diferença de pontuações e palavras de fato:
```
[token.orth_ for token in doc if not token.is_punct]
```
Um conceito muito importante de NLP é o de similaridade. Como medir se 2 palavras carregam informações similares? Isso pode ser interessante para, por exemplo, compactarmos nosso texto, ou ainda para descobrir o significado de palavras, termos e gírias desconhecidas. Para isso, utilizamos o método `.similarity()` de um token em relação ao outro:
```
print(tokens[0].similarity(tokens[5]))
print(tokens[0].similarity(tokens[3]))
```
Na célula abaixo, sinta-se livre para realizar os teste que quiser com similaridades em português!
Quando realizamos o load de um pacote linguístico, também estamos carregando noções da estrutura gramatical, sintática e sintagmática da língua. Podemos, por exemplo, utilizar o atributo `.pos_`, de Part of Speech (POS), para extrair a função de cada token na frase:
```
[(token.orth_, token.pos_) for token in doc]
```
Ok, mas como lidamos com o problema da dimensionalidade? Podemos utilizar 2 conceitos chamados **lemmatization** e **stemming**. A lemmatization em lingüística é o processo de agrupar as formas flexionadas de uma palavra para que elas possam ser analisadas como um único item, identificado pelo lema da palavra ou pela forma de dicionário. Já o stemming busca o radical da palavra:
```
[token.lemma_ for token in doc if token.pos_ == 'VERB'] # lemmatization
```
Na célula abaixo, crie um novo doc e aplique uma lemmatization em seus verbos:
```
doc = nlp(u'encontrei, encontraram, encontrarão, encontrariam')
[token.lemma_ for token in doc if token.pos_ == 'VERB'] # lemmatization
doc = nlp(u'encontrar encontrei')
tokens = [token for token in doc]
tokens[0].is_ancestor(tokens[1]) #checagem de radicais
```
Por fim, queremos extrair entidades de uma frase. Entenda entidades como personagens num doc. Podemos acessar as entidades de uma frase ao chamar `ents` de um doc:
```
doc = nlp(u'Machado de Assis um dos melhores escritores do Brasil, foi o primeiro presidente da Academia Brasileira de Letras')
doc.ents
```
Ao analisar as entidades de uma frase, podemos inclusive entender que tipo de entidade ela pertence:
```
[(entity, entity.label_) for entity in doc.ents]
wiki_obama = """Barack Obama is an American politician who served as
the 44th President of the United States from 2009 to 2017. He is the first
African American to have served as president,
as well as the first born outside the contiguous United States."""
```
E isso funciona para cada pacote linguístico que você utilizar:
```
nlp = spacy.load('en')
nlp_obama = nlp(wiki_obama)
[(i, i.label_) for i in nlp_obama.ents]
```
## SpaCy + Scikit Learn
Para demonstrar como realizar o pré-processamento de um datasetv linguístico e como conectar SpaCy e sklearn, vamos fazer um reconhecedor de emoções simples:
```
# stopwords são tokens de uma língua que carregam pouca informação, como conectores e pontuações.
# Fique atento ao utilizar isso! Por exemplo, @ e # são potnuações importantíssimas num case
# utilizando dados do Twitter
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stopwords
# Nosso modelo BoW
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import accuracy_score
from sklearn.base import TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
import string
punctuations = string.punctuation
from spacy.lang.en import English
parser = English()
# Custom transformer using spaCy
class predictors(TransformerMixin):
def transform(self, X, **transform_params):
return [clean_text(text) for text in X]
def fit(self, X, y=None, **fit_params):
return self
def get_params(self, deep=True):
return {}
# Vamos limpar o texto jogando tudo para minúsculas
def clean_text(text):
return text.strip().lower()
```
Vamos criar uma função que tokeniza nosso dataset já tratando-o com lemmatization e removendo stopwords:
```
def spacy_tokenizer(sentence):
tokens = parser(sentence)
tokens = [tok.lemma_.lower().strip() if tok.lemma_ != "-PRON-" else tok.lower_ for tok in tokens]
tokens = [tok for tok in tokens if (tok not in stopwords and tok not in punctuations)]
return tokens
# create vectorizer object to generate feature vectors, we will use custom spacy’s tokenizer
vectorizer = CountVectorizer(tokenizer = spacy_tokenizer, ngram_range=(1,2))
classifier = LinearSVC()
# Create the pipeline to clean, tokenize, vectorize, and classify
pipe = Pipeline([("cleaner", predictors()),
('vectorizer', vectorizer),
('classifier', classifier)])
# Load sample data
train = [('I love this sandwich.', 'pos'),
('this is an amazing place!', 'pos'),
('I feel very good about these beers.', 'pos'),
('this is my best work.', 'pos'),
("what an awesome view", 'pos'),
('I do not like this restaurant', 'neg'),
('I am tired of this stuff.', 'neg'),
("I can't deal with this", 'neg'),
('he is my sworn enemy!', 'neg'),
('my boss is horrible.', 'neg')]
test = [('the beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feelin dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a good friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')]
# Create model and measure accuracy
pipe.fit([x[0] for x in train], [x[1] for x in train])
pred_data = pipe.predict([x[0] for x in test])
for (sample, pred) in zip(test, pred_data):
print(sample, pred)
print("Accuracy:", accuracy_score([x[1] for x in test], pred_data))
```
Nice! Conseguimos conectar SpaCy e sklearn para uma ferramenta de análise de sentimentos simples!. Agora vamos para um problema mais complexo:
<img src="imgs/simpsons.jpg" align="left" width="60%">
## Simpsons Dataset
Esse __[dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data/downloads/simpsons_script_lines.csv/1)__ é bem famoso em NLP, ele contém personagens, localizações, falas e outras infos de mais 600+ episódios de Simpsons! Vamos construir um classificador que consegue entender a linguagem de Simpsons e realizar operações linguísticas nela.
```
import re # For preprocessing
import pandas as pd
from time import time
from collections import defaultdict # For word frequency
import logging # Setting up the loggings to monitor gensim. DS SOBREVIVE DE LOGS
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
df = pd.read_csv('./data/simpsons_script_lines.csv', error_bad_lines=False, usecols = ['raw_character_text', 'spoken_words'])
df.shape
df.head()
```
Vamos fazer um exercício de sanidade e ver se temos valores nulos:
```
df.isnull().sum()
```
Ok, famoso `.dropna()` para limpar nosso dataset. Em casos de NLP, podemos fazer isso nessa escala
```
df = df.dropna().reset_index(drop=True)
df.isnull().sum()
nlp = spacy.load('en', disable=['ner', 'parser']) # disabling Named Entity Recognition for speed
def cleaning(doc):
# Lemmatizes and removes stopwords
# doc needs to be a spacy Doc object
txt = [token.lemma_ for token in doc if not token.is_stop]
# Word2Vec uses context words to learn the vector representation of a target word,
# if a sentence is only one or two words long,
# the benefit for the training is very small
if len(txt) > 2:
return ' '.join(txt)
```
Vamos retirar os caracteres não alfabéticos:
```
brief_cleaning = (re.sub("[^A-Za-z']+", ' ', str(row)).lower() for row in df['spoken_words']) #REGEX
```
Ok, vamos executar nossa função de limpeza para todo o dataset! Observe como o shape vai mudar. O SpaCy nos permite criar pipelines para esse processo:
```
t = time()
txt = [cleaning(doc) for doc in nlp.pipe(brief_cleaning, batch_size=5000, n_threads=-1)]
print('Time to clean up everything: {} mins'.format(round((time() - t) / 60, 2)))
df_clean = pd.DataFrame({'clean': txt})
df_clean = df_clean.dropna().drop_duplicates()
df_clean.shape
```
Hora de utilizar a biblioteca Gensim. O Gensim é uma biblioteca de código aberto para modelagem de tópico não supervisionada e processamento de linguagem natural, usando o aprendizado de máquina estatístico moderno:
```
from gensim.models.phrases import Phrases, Phraser
sent = [row.split() for row in df_clean['clean']]
phrases = Phrases(sent, min_count=30, progress_per=10000)
```
Vamos utilizar os __[bigrams](https://radimrehurek.com/gensim/models/phrases.html)__ do Gensim para detectar expressões comuns, como Bart Simpson e Mr Burns
```
bigram = Phraser(phrases)
sentences = bigram[sent]
word_freq = defaultdict(int)
for sent in sentences:
for i in sent:
word_freq[i] += 1
len(word_freq)
sorted(word_freq, key=word_freq.get, reverse=True)[:10]
```
Vamos construir o modelo __[word2vec](https://radimrehurek.com/gensim/models/word2vec.html)__ do Gensim. Antes disso, vamos entender o modelo:
<img src="imgs/word2vec.png" align="left" width="80%">
O modelo word2vec foi implementado pelo time do Google Reaserch em 2013 com o objetivo de vetorizar tokens e entidades. Sua premissa é de que termos similares aparecem sob contextos similares, portanto, se 2 termos aparecem sob o mesmo contexto, eles têm uma chance grande de carregar informações próximas. Dessa forma, conseguimos construir um espaço n-dimensional de termos e realizar operações vetoriais sob essas palavras!
```
import multiprocessing
cores = multiprocessing.cpu_count() # Count the number of cores in a computer
from gensim.models import Word2Vec
w2v_model = Word2Vec(min_count=20,
window=2,
size=300,
sample=6e-5,
alpha=0.03,
min_alpha=0.0007,
negative=20,
workers=cores-1)
```
Os hiperparâmetros utilizados são:
- min_count = int - Ignores all words with total absolute frequency lower than this - (2, 100)
- window = int - The maximum distance between the current and predicted word within a sentence. E.g. window words on the left and window words on the left of our target - (2, 10)
- size = int - Dimensionality of the feature vectors. - (50, 300)
- sample = float - The threshold for configuring which higher-frequency words are randomly downsampled. Highly influencial. - (0, 1e-5)
- alpha = float - The initial learning rate - (0.01, 0.05)
- min_alpha = float - Learning rate will linearly drop to min_alpha as training progresses. To set it: alpha - (min_alpha * epochs) ~ 0.00
- negative = int - If > 0, negative sampling will be used, the int for negative specifies how many "noise words" should be drown. If set to 0, no - negative sampling is used. - (5, 20)
- workers = int - Use these many worker threads to train the model (=faster training with multicore machines)
<br>
Com o modelo instanciado, precisamos construir nosso **corpus**, ou vocabulário. Vamos alimentar nosso modelo com os docs:
```
t = time()
w2v_model.build_vocab(sentences, progress_per=10000)
print('Time to build vocab: {} mins'.format(round((time() - t) / 60, 2)))
```
Tudo pronto! Vamos treinar nosso modelo!
```
t = time()
w2v_model.train(sentences, total_examples=w2v_model.corpus_count, epochs=30, report_delay=1)
print('Time to train the model: {} mins'.format(round((time() - t) / 60, 2)))
w2v_model.init_sims(replace=True)
w2v_model.wv.most_similar(positive=["homer"])
w2v_model.wv.most_similar(positive=["marge"])
w2v_model.wv.most_similar(positive=["bart"])
w2v_model.wv.similarity('maggie', 'baby')
w2v_model.wv.similarity('bart', 'nelson')
w2v_model.wv.doesnt_match(['jimbo', 'milhouse', 'kearney'])
w2v_model.wv.doesnt_match(["nelson", "bart", "milhouse"])
w2v_model.wv.most_similar(positive=["woman", "homer"], negative=["marge"], topn=3)
w2v_model.wv.most_similar(positive=["woman", "bart"], negative=["man"], topn=3)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_style("darkgrid")
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def tsnescatterplot(model, word, list_names):
""" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,
its list of most similar words, and a list of words.
"""
arrays = np.empty((0, 300), dtype='f')
word_labels = [word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
# gets list of most similar words
close_words = model.wv.most_similar([word])
# adds the vector for each of the closest words to the array
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
# adds the vector for each of the words from list_names to the array
for wrd in list_names:
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
# Reduces the dimensionality from 300 to 50 dimensions with PCA
reduc = PCA(n_components=19).fit_transform(arrays)
# Finds t-SNE coordinates for 2 dimensions
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(reduc)
# Sets everything up to plot
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
# Basic plot
p1 = sns.regplot(data=df,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df['color']
}
)
# Adds annotations one by one with a loop
for line in range(0, df.shape[0]):
p1.text(df["x"][line],
df['y'][line],
' ' + df["words"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
tsnescatterplot(w2v_model, 'homer', ['dog', 'bird', 'ah', 'maude', 'bob', 'mel', 'apu', 'duff'])
tsnescatterplot(w2v_model, 'maggie', [i[0] for i in w2v_model.wv.most_similar(negative=["maggie"])])
tsnescatterplot(w2v_model, "mr_burn", [t[0] for t in w2v_model.wv.most_similar(positive=["mr_burn"], topn=20)][10:])
```
|
github_jupyter
|
# Developing Custom Models
Panel ships with a number of custom Bokeh models, which have both Python and Javascript components. When developing Panel these custom models have to be compiled. This happens automatically with `pip install -e .` or `python setup.py develop`, however when runnning actively developing you can rebuild the extension with `panel build panel`. The build command is just an alias for `bokeh build`; see the [Bokeh developer guide](https://docs.bokeh.org/en/latest/docs/dev_guide/setup.html) for more information about developing bokeh models or the [Awesome Panel - Bokeh Extensions Guide](https://awesome-panel.readthedocs.io/en/latest/guides/awesome-panel-extensions-guide/bokeh-extensions.html)
Just like any other Javascript (or Typescript) library Panel defines a `package.json` and `package-lock.json` files. When adding, updating or removing a dependency in the `package.json` file ensure you commit the changes to the `package-lock.json` after running npm install.
## Adding a new Custom Model
This example will guide you through adding a new model.
We will use the the `ChartJS` model as an example. But you should replace `ChartJS` and similar with the name of your model.
Here we will add a simple Button model to start with. But we call it `ChartJS`.
My experience is that you should start small with a working example and the continue in small, incremental steps. For me it did not work trying to copy a large, complex example and refactoring it when I started out learning about Custom Models.
1. Create a new branch `chartjs`.
2. Add the files and code for a *minimum working model*. This includes
- A Panel Python model
- A Bokeh Python and TypeScript model
#### Add the Panel Python Model
Add the file *panel/pane/chartjs.py* and the code
```python
import param
from panel.widgets.base import Widget
from ..models import ChartJS as _BkChartJS
class ChartJS(Widget):
# Set the Bokeh model to use
_widget_type = _BkChartJS
# Rename Panel Parameters -> Bokeh Model properties
# Parameters like title that does not exist on the Bokeh model should be renamed to None
_rename = {
"title": None,
}
# Parameters to be mapped to Bokeh model properties
object = param.String(default="Click Me!")
clicks = param.Integer(default=0)
```
Add the Panel model to `panel/pane/__init__.py`
```python
from .chartjs import ChartJS
```
#### Add the Bokeh Python Model
Add the file *panel/models/chartjs.py* and the code
```python
from bokeh.core.properties import Int, String
from bokeh.models import HTMLBox
class ChartJS(HTMLBox):
"""Custom ChartJS Model"""
object = String()
clicks = Int()
```
Add the Bokeh model to `panel/models/__init__.py` file
```python
from .chartjs import ChartJS
```
#### Add the Bokeh TypeScript Model
Add the file *panel/models/chartjs.ts* and the code
```typescript
// See https://docs.bokeh.org/en/latest/docs/reference/models/layouts.html
import { HTMLBox, HTMLBoxView } from "@bokehjs/models/layouts/html_box"
// See https://docs.bokeh.org/en/latest/docs/reference/core/properties.html
import * as p from "@bokehjs/core/properties"
// The view of the Bokeh extension/ HTML element
// Here you can define how to render the model as well as react to model changes or View events.
export class ChartJSView extends HTMLBoxView {
model: ChartJS
objectElement: any // Element
connect_signals(): void {
super.connect_signals()
this.connect(this.model.properties.object.change, () => {
this.render();
})
}
render(): void {
super.render()
this.el.innerHTML = `<button type="button">${this.model.object}</button>`
this.objectElement = this.el.firstElementChild
this.objectElement.addEventListener("click", () => {this.model.clicks+=1;}, false)
}
}
export namespace ChartJS {
export type Attrs = p.AttrsOf<Props>
export type Props = HTMLBox.Props & {
object: p.Property<string>,
clicks: p.Property<number>,
}
}
export interface ChartJS extends ChartJS.Attrs { }
// The Bokeh .ts model corresponding to the Bokeh .py model
export class ChartJS extends HTMLBox {
properties: ChartJS.Props
constructor(attrs?: Partial<ChartJS.Attrs>) {
super(attrs)
}
static __module__ = "panel.models.chartjs"
static init_ChartJS(): void {
this.prototype.default_view = ChartJSView;
this.define<ChartJS.Props>(({Int, String}) => ({
object: [String, "Click Me!"],
clicks: [Int, 0],
}))
}
}
```
Add the `ChartJS` typescript model to *panel/models/index.ts*
```typescript
export {ChartJS} from "./chartjs"
```
#### Build the Model
You can now build the model using `panel build panel`. It should look similar to
```bash
(base) root@475bb36209a9:/workspaces/panel# panel build panel
Working directory: /workspaces/panel/panel
Using /workspaces/panel/panel/tsconfig.json
Compiling styles
Compiling TypeScript (45 files)
Linking modules
Output written to /workspaces/panel/panel/dist
All done.
```
#### Test the Model
Add the file *panel/tests/pane/test_chartjs.py* and the code
```python
import panel as pn
def test_constructor():
chartjs = pn.pane.ChartJS(object="Click Me Now!")
def get_app():
chartjs = pn.pane.ChartJS(object="Click Me Now!")
return pn.Column(
chartjs, pn.Param(chartjs, parameters=["object", "clicks"])
)
if __name__.startswith("bokeh"):
get_app().servable()
```
Run `pytest panel/tests/pane/test_chartjs.py` and make sure it passes.
Serve the app with `panel serve panel/tests/pane/test_chartjs.py --auto --show`
You have to *hard refresh* your browser to reload the new panel `.js` files with your `ChartJS` model. In Chrome I press `CTRL+F5`. See [How to hard refresh in Chrome, Firefox and IE](https://www.namecheap.com/support/knowledgebase/article.aspx/10078/2194/how-to-do-a-hard-refresh-in-chrome-firefox-and-ie/) for other browsers.
Now you can manually test your model

#### Save your new Model
Finally you should save your changes via `git add .` and maybe even commit them `git commit -m "First iteration on ChartJS model"`
## Build a small HTML Example
In the beginning of your journey into Custom Models there will be things that break and difficulties figuring out why. When you combine several new things it can be really difficult to figure out why. Is the problem Panel, Bokeh, Python, Javascript, Node or ....?
So I suggest creating a small, working example in plain HTML/ JS before you start combining with Panel and Bokeh Models.
Please note the below example works out of the box. It is not always that easy importing javascript libraries in a Notebook. So it can be a good idea to work in a `.html` file first.
```
%%HTML
<script src="https://cdn.jsdelivr.net/npm/[email protected]"></script>
<div class="chart-container" style="position: relative; height:400px; width:100%">
<canvas id="myChart"></canvas>
</div>
<script>
var ctx = document.getElementById('myChart').getContext('2d');
var chart = new Chart(ctx, {
// The type of chart we want to create
type: 'line',
// The data for our dataset
data: {
labels: ['January', 'February', 'March', 'April', 'May', 'June', 'July'],
datasets: [{
label: 'My First dataset',
backgroundColor: 'rgb(255, 99, 132)',
borderColor: 'rgb(255, 99, 132)',
data: [0, 10, 5, 2, 20, 30, 45]
}]
},
// Configuration options go here
options: {
responsive: true,
maintainAspectRatio: false,
}
});
</script>
```
## Using the Javascript Model
Getting something shown using the `ChartJS` `js` library would be the next step. It might require a bit of experimentation, looking at other examples, google or support from the community.
Here I found that a good step where the following changes
#### Import the Javascript Library
Update *test_chartjs.py* tp
```python
import panel as pn
def test_constructor():
chartjs = pn.pane.ChartJS(object="Click Me Now!")
def get_app():
chartjs = pn.pane.ChartJS(object="Click Me Now!")
return pn.Column(
chartjs, pn.Param(chartjs, parameters=["object", "clicks"])
)
if __name__.startswith("bokeh"):
pn.config.js_files["chartjs"]="https://cdn.jsdelivr.net/npm/[email protected]"
get_app().servable()
```
#### Render the Plot
In the *chartjs.ts* file add `import { canvas, div } from "@bokehjs/core/dom";` at the top and change the `render` function to
```typescript
render(): void {
super.render()
var object = {
type: 'line',
data: {
labels: ['January', 'February', 'March', 'April', 'May', 'June', 'July'],
datasets: [{
label: 'My First dataset',
backgroundColor: 'rgb(255, 99, 132)',
borderColor: 'rgb(255, 99, 132)',
data: [0, 10, 5, 2, 20, 30, 45]
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
}
}
var chartContainer = div({class: "chartjs-container", style: "position: relative; height:400px; width:100%"})
var chartCanvas = canvas({class: "chartjs"})
chartContainer.appendChild(chartCanvas)
var ctx: any = chartCanvas.getContext('2d');
new (window as any).Chart(ctx, object);
this.el.appendChild(chartContainer)
}
```
#### Build and Test
Run `panel build panel` and hard refresh your browser. You should see

#### Save Your Model
Remember to stage and/ or commit your working changes.
## Next Steps
- Enable setting the Python `ChartJS.object` parameter to any ChartJS dictionary.
- Checkout support for different sizing modes, responsiveness and window maximize.
- Configure the javascript, css, .. dependencies in the Bokeh Python File.
- .....
## Check List
When you develop and test your model eventually you should consider implementing and testing
- Dynamic updates to the `object` parameter and any other parameters added.
- Resizing
- Does it resize when `width` is changed dynamically?
- Does it resize when `height` is changed dynamically?
- Does it work with `sizing_mode="stretch_width"` etc.
- Themes (Light, Dark)
- Window Resizing, Window Maximizing, Window Minimizing.
- Streaming of Data. Is it efficient?
- Events (Click, Hover etc.)
- Consider supporting the Python Wrapper (ECharts -> PyECharts, ChartJS -> [PyChart.JS](https://pypi.org/project/pyChart.JS/))
- Tests
- Reference Notebook
- Communication also to for example ChartJS community and developers.
## Tips and Tricks
- Work in small increments and stage your changes when they work
- Remember to `panel build panel` and hard refresh before you test.
- Add [console.log](https://www.w3schools.com/jsref/met_console_log.asp) statements to your `.ts` code for debugging.
- Use the [*Developer Tools*](https://developers.google.com/web/tools/chrome-devtools) *console* to see the `console.log` output and identify errors. In my browsers I toggle the Developer Tools using `CTRL+SHIFT+I`.
- Find inspiration for next steps in the [existing Panel Custom Models](https://github.com/holoviz/panel/tree/master/panel/models). For `ChartJS` one of the most relevant custom models would be `Echarts`. See Panel [echarts.py](https://github.com/holoviz/panel/blob/master/panel/pane/echarts.py), Bokeh [echarts.py](https://github.com/holoviz/panel/blob/master/panel/models/echarts.py) and [echarts.ts](https://github.com/holoviz/panel/blob/master/panel/models/echarts.ts).
- Use the existing documentation
- [Panel - Developer Guide](https://panel.holoviz.org/developer_guide/index.html)
- [Bokeh - Extending Bokeh](https://docs.bokeh.org/en/latest/docs/user_guide/extensions.html)
- [Awesome Panel - Bokeh Extensions Guide](https://awesome-panel.readthedocs.io/en/latest/guides/awesome-panel-extensions-guide/bokeh-extensions.html)
- Use Google Search. You don't have to be an expert javascript or typescript developer. It's a very small subset of those languages that is used when developing Custom Models.
- Ask for help in [PyViz Gitter](https://gitter.im/pyviz/pyviz), [HoloViz Discourse](https://discourse.holoviz.org/) and [Bokeh Discourse](https://discourse.bokeh.org/) forums.
|
github_jupyter
|
```
from IPython.display import Markdown as md
### change to reflect your notebook
_nb_loc = "07_training/07a_ingest.ipynb"
_nb_title = "Writing an efficient ingest Loop"
### no need to change any of this
_nb_safeloc = _nb_loc.replace('/', '%2F')
md("""
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name={1}&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fblob%2Fmaster%2F{2}&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fraw%2Fmaster%2F{2}">
<img src="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png"/> Run in AI Platform Notebook</a>
</td>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/{0}">
<img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
""".format(_nb_loc, _nb_title, _nb_safeloc))
```
# Efficient Ingest
In this notebook, we speed the ingest of training/evaluation data into the model.
## Enable GPU and set up helper functions
This notebook and pretty much every other notebook in this repository
will run faster if you are using a GPU.
On Colab:
- Navigate to Edit→Notebook Settings
- Select GPU from the Hardware Accelerator drop-down
On Cloud AI Platform Notebooks:
- Navigate to https://console.cloud.google.com/ai-platform/notebooks
- Create an instance with a GPU or select your instance and add a GPU
Next, we'll confirm that we can connect to the GPU with tensorflow:
```
import tensorflow as tf
print('TensorFlow version' + tf.version.VERSION)
print('Built with GPU support? ' + ('Yes!' if tf.test.is_built_with_cuda() else 'Noooo!'))
print('There are {} GPUs'.format(len(tf.config.experimental.list_physical_devices("GPU"))))
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
```
## Original code
This is the original code, from [../06_preprocessing/06e_colordistortion.ipynb](../06_preprocessing/06e_colordistortion.ipynb)
We have a few variations of creating a preprocessed dataset.
```
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import os
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
from tensorflow.data.experimental import AUTOTUNE
IMG_HEIGHT = 448 # note *twice* what we used to have
IMG_WIDTH = 448
IMG_CHANNELS = 3
CLASS_NAMES = 'daisy dandelion roses sunflowers tulips'.split()
def training_plot(metrics, history):
f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5))
for idx, metric in enumerate(metrics):
ax[idx].plot(history.history[metric], ls='dashed')
ax[idx].set_xlabel("Epochs")
ax[idx].set_ylabel(metric)
ax[idx].plot(history.history['val_' + metric]);
ax[idx].legend([metric, 'val_' + metric])
class _Preprocessor:
def __init__(self):
# nothing to initialize
pass
def read_from_tfr(self, proto):
feature_description = {
'image': tf.io.VarLenFeature(tf.float32),
'shape': tf.io.VarLenFeature(tf.int64),
'label': tf.io.FixedLenFeature([], tf.string, default_value=''),
'label_int': tf.io.FixedLenFeature([], tf.int64, default_value=0),
}
rec = tf.io.parse_single_example(
proto, feature_description
)
shape = tf.sparse.to_dense(rec['shape'])
img = tf.reshape(tf.sparse.to_dense(rec['image']), shape)
label_int = rec['label_int']
return img, label_int
def read_from_jpegfile(self, filename):
# same code as in 05_create_dataset/jpeg_to_tfrecord.py
img = tf.io.read_file(filename)
img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)
img = tf.image.convert_image_dtype(img, tf.float32)
return img
def preprocess(self, img):
return tf.image.resize_with_pad(img, IMG_HEIGHT, IMG_WIDTH)
def create_preproc_dataset_plain(pattern):
preproc = _Preprocessor()
trainds = tf.data.TFRecordDataset(
[filename for filename in tf.io.gfile.glob(pattern)],
compression_type='GZIP'
).map(preproc.read_from_tfr).map(
lambda img, label: (preproc.preprocess(img), label)
)
return trainds
# note: addition of AUTOTUNE to the map() calls
def create_preproc_dataset_parallelmap(pattern):
preproc = _Preprocessor()
def _preproc_img_label(img, label):
return (preproc.preprocess(img), label)
trainds = (
tf.data.TFRecordDataset(
[filename for filename in tf.io.gfile.glob(pattern)],
compression_type='GZIP'
)
.map(preproc.read_from_tfr, num_parallel_calls=AUTOTUNE)
.map(_preproc_img_label, num_parallel_calls=AUTOTUNE)
)
return trainds
# note: splits the files into two halves and interleaves datasets
def create_preproc_dataset_interleave(pattern, num_parallel=None):
preproc = _Preprocessor()
files = [filename for filename in tf.io.gfile.glob(pattern)]
if len(files) > 1:
print("Interleaving the reading of {} files.".format(len(files)))
def _create_half_ds(x):
if x == 0:
half = files[:(len(files)//2)]
else:
half = files[(len(files)//2):]
return tf.data.TFRecordDataset(half,
compression_type='GZIP')
trainds = tf.data.Dataset.range(2).interleave(
_create_half_ds, num_parallel_calls=AUTOTUNE)
else:
trainds = tf.data.TFRecordDataset(files,
compression_type='GZIP')
def _preproc_img_label(img, label):
return (preproc.preprocess(img), label)
trainds = (trainds
.map(preproc.read_from_tfr, num_parallel_calls=num_parallel)
.map(_preproc_img_label, num_parallel_calls=num_parallel)
)
return trainds
def create_preproc_image(filename):
preproc = _Preprocessor()
img = preproc.read_from_jpegfile(filename)
return preproc.preprocess(img)
class RandomColorDistortion(tf.keras.layers.Layer):
def __init__(self, contrast_range=[0.5, 1.5],
brightness_delta=[-0.2, 0.2], **kwargs):
super(RandomColorDistortion, self).__init__(**kwargs)
self.contrast_range = contrast_range
self.brightness_delta = brightness_delta
def call(self, images, training=None):
if not training:
return images
contrast = np.random.uniform(
self.contrast_range[0], self.contrast_range[1])
brightness = np.random.uniform(
self.brightness_delta[0], self.brightness_delta[1])
images = tf.image.adjust_contrast(images, contrast)
images = tf.image.adjust_brightness(images, brightness)
images = tf.clip_by_value(images, 0, 1)
return images
```
## Speeding up the reading of data
To try it out, we'll simply read through the data several times and compute some quantity on the images.
```
def loop_through_dataset(ds, nepochs):
lowest_mean = tf.constant(1.)
for epoch in range(nepochs):
thresh = np.random.uniform(0.3, 0.7) # random threshold
count = 0
sumsofar = tf.constant(0.)
for (img, label) in ds:
# mean of channel values > thresh
mean = tf.reduce_mean(tf.where(img > thresh, img, 0))
sumsofar = sumsofar + mean
count = count + 1
if count%100 == 0:
print('.', end='')
mean = sumsofar/count
print(mean)
if mean < lowest_mean:
lowest_mean = mean
return lowest_mean
PATTERN_SUFFIX, NUM_EPOCHS = '-0000[01]-*', 2 # 2 files, 2 epochs
#PATTERN_SUFFIX, NUM_EPOCHS = '-*', 20 # 16 files, 20 epochs
%%time
ds = create_preproc_dataset_plain(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
)
loop_through_dataset(ds, NUM_EPOCHS)
%%time
# parallel map
ds = create_preproc_dataset_parallelmap(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
)
loop_through_dataset(ds, NUM_EPOCHS)
%%time
# with interleave
ds = create_preproc_dataset_interleave(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX,
num_parallel=None
)
loop_through_dataset(ds, NUM_EPOCHS)
%%time
# with interleave and parallel mpas
ds = create_preproc_dataset_interleave(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX,
num_parallel=AUTOTUNE
)
loop_through_dataset(ds, NUM_EPOCHS)
```
When I did this, this is what I got:
| Method | CPU time | Wall time |
| ---------------------- | ----------- | ------------ |
| Plain | 7.53s | 7.99s |
| Parallel Map | 8.30s | 5.94s |
| Interleave | 8.60s | 5.47s |
| Interleave+Parallel Map| 8.44s | 5.23s |
## ML model
The computation above was pretty cheap involving merely adding up all the pixel values.
What happens if we need a bit more complexity (gradient calc, etc.)?
```
def train_simple_model(ds, nepochs):
model = tf.keras.Sequential([
tf.keras.layers.Flatten(
input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)),
#tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(len(CLASS_NAMES), activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False),
metrics=['accuracy'])
model.fit(ds, epochs=nepochs)
%%time
ds = create_preproc_dataset_plain(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).batch(1)
train_simple_model(ds, NUM_EPOCHS)
%%time
# parallel map
ds = create_preproc_dataset_parallelmap(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).batch(1)
train_simple_model(ds, NUM_EPOCHS)
%%time
# with interleave
ds = create_preproc_dataset_interleave(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX,
num_parallel=None
).batch(1)
train_simple_model(ds, NUM_EPOCHS)
%%time
# with interleave and parallel mpas
ds = create_preproc_dataset_interleave(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX,
num_parallel=AUTOTUNE
).batch(1)
train_simple_model(ds, NUM_EPOCHS)
```
We note that the improvement remains:
| Method | CPU time | Wall time |
| -----------------------| ----------- | ------------ |
| Plain | 9.91s | 9.39s |
| Parallel Map | 10.7s | 8.17s |
| Interleave | 10.5s | 7.54s |
| Interleave+Parallel Map| 10.3s | 7.17s |
## Speeding up the handling of data
```
# alias to the more efficient one
def create_preproc_dataset(pattern):
return create_preproc_dataset_interleave(pattern, num_parallel=AUTOTUNE)
%%time
# add prefetching
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).prefetch(AUTOTUNE).batch(1)
train_simple_model(ds, NUM_EPOCHS)
%%time
# Add batching of different sizes
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).prefetch(AUTOTUNE).batch(8)
train_simple_model(ds, NUM_EPOCHS)
%%time
# Add batching of different sizes
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).prefetch(AUTOTUNE).batch(16)
train_simple_model(ds, NUM_EPOCHS)
%%time
# Add batching of different sizes
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).prefetch(AUTOTUNE).batch(32)
train_simple_model(ds, NUM_EPOCHS)
%%time
# add caching: always do this optimization last.
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).cache().batch(32)
train_simple_model(ds, NUM_EPOCHS)
%%time
# add caching: always do this optimization last.
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).prefetch(AUTOTUNE).cache().batch(32)
train_simple_model(ds, NUM_EPOCHS)
%%time
# add caching: always do this optimization last.
ds = create_preproc_dataset(
'gs://practical-ml-vision-book/flowers_tfr/train' + PATTERN_SUFFIX
).cache().prefetch(AUTOTUNE).batch(32)
train_simple_model(ds, NUM_EPOCHS)
```
Adding to the previous table:
| Method | CPU time | Wall time |
| -----------------------| ----------- | ------------ |
| Plain | 9.91s | 9.39s |
| Parallel Map | 10.7s | 8.17s |
| Interleave | 10.5s | 7.54s |
| Interleave+Parallel Map| 10.3s | 7.17s |
| Interleave + Parallel, and then adding: | - | - |
| Prefetch | 11.4s | 8.09s |
| Batch size 8 | 9.56s | 6.90s |
| Batch size 16 | 9.90s | 6.70s |
| Batch size 32 | 9.68s | 6.37s |
| Interleave + Parallel + batchsize 32, and then adding: | - | - |
| Cache | 6.16s | 4.36s |
| Prefetch + Cache | 5.76s | 4.04s |
| Cache + Prefetch | 5.65s | 4.19s |
So, the best option is:
<pre>
ds = create_preproc_dataset_interleave(pattern, num_parallel=AUTOTUNE).prefetch(AUTOTUNE).cache().batch(32)
</pre>
## License
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
# Day 16-17: Urban/Rural - Land
I need to catch up...
This won't be the most artistic day, but I would find it useful in my life to have code that downloads GHSL datasets and plots them.
## Configuration
```
import os
import rioxarray
import matplotlib.pyplot as plt
import matplotlib.colors as colors
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
```
## GHSL-POP
Population dataset from [Global Human Settlement Layer, GHSL](https://ghsl.jrc.ec.europa.eu/)
```
# Choose tile of interest
tile = "28_9"
# Arrange URL
url = ("zip+https://cidportal.jrc.ec.europa.eu/"\
"ftp/jrc-opendata/GHSL/"\
"GHS_POP_MT_GLOBE_R2019A/"\
"GHS_POP_E2015_GLOBE_R2019A_54009_1K/"\
"V1-0/tiles/"\
f"GHS_POP_E2015_GLOBE_R2019A_54009_1K_V1_0_{tile}.zip"\
f"!GHS_POP_E2015_GLOBE_R2019A_54009_1K_V1_0_{tile}.tif")
# Read data
pop = rioxarray.open_rasterio(url, masked=True)
# Preview
pop
# Construct plot
fig, ax = plt.subplots(figsize=(8,6))
im = pop.squeeze().plot.imshow(
ax=ax,
vmin=0,
vmax=1000,
cmap='inferno',
cbar_kwargs={"label": "Population"}
)
ax.axis('off')
ax.set(title="GHSL-POP")
# Save
out_file = f"16-17_POP.png"
out_path = os.path.join("..", "contributions", out_file)
fig.savefig(out_path, dpi=300, facecolor="w", bbox_inches="tight")
# Preview
plt.show()
```
## GHSL-SMOD
Settlement dataset from [Global Human Settlement Layer, GHSL](https://ghsl.jrc.ec.europa.eu/)
```
# Arrange URL
url = ("zip+https://cidportal.jrc.ec.europa.eu/"\
"ftp/jrc-opendata/GHSL/"\
"GHS_SMOD_POP_GLOBE_R2019A/"\
"GHS_SMOD_POP2015_GLOBE_R2019A_54009_1K/"\
"V2-0/tiles/"\
f"GHS_SMOD_POP2015_GLOBE_R2019A_54009_1K_V2_0_{tile}.zip"\
f"!GHS_SMOD_POP2015_GLOBE_R2019A_54009_1K_V2_0_{tile}.tif")
# Read data
smod = rioxarray.open_rasterio(url, masked=True)
# Preview
smod
# Set colors
cmap_discrete = colors.ListedColormap(["#ffffff","#7ab6f5","#cdf57a","#abcd66","#375623","#ffff00","#a87000","#732600","#ff0000"])
cmap_labels = ["n/a","Water","Very low density rural","Low density rural","Rural","Suburban or peri-urban","Semi-dense urban cluster","Dense urban cluster","Urban centre"]
cmap_vals = [-200,10,11,12,13,21,22,23,30.1]
# Construct map
fig, ax = plt.subplots(figsize=(8,6))
im = smod.squeeze().plot.imshow(ax=ax,
cmap=cmap_discrete,
levels=cmap_vals,
add_colorbar=False,
)
cbar = ax.figure.colorbar(im, ax=ax, ticks=cmap_vals)
cbar.set_ticklabels(cmap_labels)
ax.set(title="GHSL-SMOD")
ax.axis('off')
# Save
out_file = f"16-17_SMOD.png"
out_path = os.path.join("..", "contributions", out_file)
fig.savefig(out_path, dpi=300, facecolor="w", bbox_inches="tight")
# Preview
plt.show()
```
|
github_jupyter
|
# Parsing San Jose PD's firearm search reports
This example uses `pdfplumber`'s visual debugging and text-extraction features to parse a fixed-width table embedded in a PDF. Thanks to [Ron Campbell](https://twitter.com/campbellronaldw) for the sample PDF.
```
import pdfplumber
import re
print(pdfplumber.__version__)
```
## Load the PDF
```
pdf = pdfplumber.open("../pdfs/san-jose-pd-firearm-sample.pdf")
```
## Examine the first page
```
p0 = pdf.pages[0]
im = p0.to_image()
im
```
## See where the characters are on the page
Below, we draw rectangles around each of the `char` objects that `pdfplumber` detected. By doing so, we can see that every line of the main part of the report is the same width, and that there are space (`" "`) characters padding out each field. That means we can parse those lines a lot like we'd parse a standard fixed-width data file.
```
im.reset().draw_rects(p0.chars)
```
## Extract the text from the PDF
Using the `Page.extract_text(...)` method, we grab every character on the page, line by line:
```
text = p0.extract_text()
print(text)
```
## Stripping away the header and footer
In this step, we use a regular expression to focus on the core part of the page — the table.
```
core_pat = re.compile(r"LOCATION[\-\s]+(.*)\n\s+Flags = e", re.DOTALL)
core = re.search(core_pat, text).group(1)
print(core)
```
## Parse each group of two lines
In the report, each firearm takes up two lines. The code below splits the core table into two-line groups, and then parses out the fields, based on the number of characters in each field:
```
lines = core.split("\n")
line_groups = list(zip(lines[::2], lines[1::2]))
print(line_groups[0])
def parse_row(first_line, second_line):
return {
"type": first_line[:20].strip(),
"item": first_line[21:41].strip(),
"make": first_line[44:89].strip(),
"model": first_line[90:105].strip(),
"calibre": first_line[106:111].strip(),
"status": first_line[112:120].strip(),
"flags": first_line[124:129].strip(),
"serial_number": second_line[0:13].strip(),
"report_tag_number": second_line[21:41].strip(),
"case_file_number": second_line[44:64].strip(),
"storage_location": second_line[68:91].strip(),
}
parsed = [ parse_row(first_line, second_line)
for first_line, second_line in line_groups ]
```
## Result
Below, you can see the parsed data for the first two firearms in the report:
```
parsed[:2]
```
## Preview via `pandas.DataFrame`
To make it a little easier to read, here's the full table, parsed and represented as a `pandas.DataFrame` (for ease of viewing):
```
import pandas as pd
columns = list(parsed[0].keys())
pd.DataFrame(parsed)[columns]
```
---
---
---
|
github_jupyter
|
## OOP
A programming paradigm that provides a means of structuring programs so that properties and behaviors are bundled into individual objects.
Pros:
* code modularisation thus ease in troubleshooting.
* reuse of code through inheritance.
* flexibility through polymorphism (multiple usage).
### 1. Class Definition
> Classes define functions called methods, which identify the behaviors and actions that an object created from the class can perform with its data.
```
# function definition
def fix_laptop(harddisk, money):
# check if laptop is ok
if 'not' in harddisk:
print('Laptop has a problem')
else:
print('Laptop is OK')
code modularisation thus ease in troubleshooting.
# fix it kama iko na issue
if money == 'yes':
return 'DJ can get his laptop fixed'
else:
return 'DJ is fucked'
# class definition
class Kisauni():
# attributes
# class attributes
security = 'mateja everywhere'
ethnicity = 'waswahili'
# instance attributes // dunder methods
def __init__(self, mtaa, drainage_system, housing_style):
self.mtaa = mtaa
self.drainage_system = drainage_system
self.housing_style = housing_style
def __str__(self):
return 'A class indicating conditions in Kisauni'
# instance methods (customised functions)
def students(self, status, name, age, campus):
if 'yes' in status.lower():
return f'{name} is a {age} year old at The {campus}'
else:
return f'{name} is a {age} year old non student'
def relationships(self,status, name, sex):
if 'YES'in status.upper():
return f'{name} is a {sex}'
else:
return f'{name} is a bi'
def rehabilitations(self,status,name,age):
if 'yes' in status.lower():
return f'{name}is a {age} must go to rehab.'
else:
return f'{name} is a {age} no rehab.'
# inheritance ( - overriding; - extending)
class Birds():
def flight(self):
return 'ALmost all birds can fly'
def edibility(self):
return 'almost all birds are edible'
class Chicken(Birds):
def flight(self):
print('chicken cannot fly')
def food(self):
return 'chicken feed on mash'
class Student:
# class attributes (uniform for all class objects)
campus = 'Technical University of Munich'
## dunder methods
''' universal properties (instance attributes - not necessarily uniform for all objects)
- arguments must be supplied when calling the class '''
def __init__(self, name, age, level, academic_year):
self.name = name
self.age = age
self.level = level
self.academic_year = academic_year
''' Class descriptor'''
def __str__(self):
return f" This is a Student class with methods: course, year and location."
## Instance Methods
'''- begine with a self, and can only be called from an instance of the class '''
# course
def course(self, course_name):
return f"{self.name} is pursuing a {self.level} in {course_name} at the {self.campus}"
# year
def year(self, year, gender):
if 'f' in gender.lower():
return f" She is a {self.age} year old currently in her {year} year."
else:
return f" He is a {self.age} year old currently in his {year} year."
# location
def location(self, location):
return f" Residing in {location}"
#race
def race(self, race):
pass
ola = Student('ola', 25, 'PhD', 3)
ola.course('MAchine LEarning')
# creating a class object/instantiating the class
student = Student('Ada', 21, 'B.Sc')
print('Object type/description:', student)
student.course('Mathematics and Computer Science'), student.year(4, 'female'), student.location('Kisauni')
```
### 2. Inheritance
> A class takes on the attributes/methods of another. Newly formed classes are called child classes, and the classes that child classes are derived from are called parent classes.
> **extending** - having additional attributes.
> **overriding** - overwriting inherited attributes.
```
''' Using the example of class Student, every time we pass a new student, we have to pass the gender
argument which determines the way the year function is returned. We can create child classes of
different genders that inherit attributes of the Student Class and override the year method'''
class Female(Student):
# overriding
def year(self, year, gender = 'female'):
return f" She is a {self.age} year old currently in her {year} year."
#extending
def under_18(self):
if self.age < 18:
return True
else:
return False
class Male(Student):
# overriding
def year(self, year, gender = 'male'):
return f" He is a {self.age} year old currently in his {year} year."
#extending
def under_18(self):
if self.age < 18:
return True
else:
return False
ada = Female('Ada', 21, 'B.Sc', 4)
ada.year(4)
f = Female('Denise', 17, 'B.Sc', 4)
f.course('Mathematics and Computer Science'), f.year(4), f.location('Berlin'), f.under_18()
m = Male('Denis', 20, 'B.Sc', 4)
m.course('Mathematics and Finance'), m.year(3), f.location('Munich'), m.under_18()
```
### 3. Polymorphism
> same function name (but different signatures) being uses for different types.
```
print('ada is a lady')
print(456)
print([5,6])
''' Polymorphism with uniform class methods'''
class Kenya():
def capital(self):
print("Nairobi is the capital of Kenya.")
def president(self):
print("Kenyatta is the president of Kenya")
class USA():
def capital(self):
print("Washington D.C. is the capital of USA.")
def president(self):
print("Biden is their newly elected president.")
k = Kenya()
u = USA()
for country in [k, u]:
country.capital()
country.president()
'''Polymorphism with a function and object'''
# in the previous example. Instead of looping: creating a function.
def func(obj):
obj.capital()
obj.president()
k = Kenya()
u = USA()
func(k)
func(u)
'''Polymorphism with inheritance'''
# This is equal to overriding in inheritance.
class Bird:
def intro(self):
print("There are many types of birds.")
def flight(self):
print("Most of the birds can fly but some cannot.")
class sparrow(Bird):
def flight(self):
print("Sparrows can fly.")
```
## Procedural Programming
> Structures a program like a recipe in that it provides a set of steps, in the form of functions and code blocks, that flow sequentially in order to complete a task.
>relies on procedure calls to create modularized code. This approach simplifies your application code by breaking it into small pieces that a developer can view easily.
```
## summing elements of a list
def sum_elements(my_list):
sum = 0
for x in my_list:
sum += x
return sum
print(sum_elements([4,5,6,7]))
```
#### Task:
Create a class Rectangle and define the following methods:
* create_rectangle
Input parameters: x, y, width, height
Return value: instance of Rectangle
Operation: create a new instance of Rectangle
* area_of_rectangle
* perimeter_of__rectangle
* product_of_the diagonals
Create a class square that inherits from class rectangle with an additional function of:
* angle_between_diagonals
|
github_jupyter
|
#### Copyright 2020 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Regression Quality
So far in this course, we have spent some time building and testing regression models. But how can we measure how good these models are? In this Colab, we will examine a few of the ways that we can measure and graph the results of a regression model in order to better understand the quality of the model.
## Building a Dataset
In order to discuss regression quality, we should probably start by building a regression model.
We will start by creating an artificial dataset to model.
Start by importing [NumPy](http://numpy.org) and setting a random seed so that we get reproducible results.
Remember: **Do not set a random seed in production code!**
```
import numpy as np
np.random.seed(0xFACADE)
```
Recall that linear regression is about trying to fit a straight line through a set of data points. The equation for a straight line is:
> $y = m*x + b$
where:
- $x$ is the feature
- $y$ is the outcome
- $m$ is the slope of the line
- $b$ is the intercept of the line on the $y$-axis
But at this point we don't even have $x$-values!
We will use NumPy's [random.uniform](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.random.uniform.html) function to generate 50 random numbers between 0 and 200 as $x$-values.
```
X = np.random.uniform(low=0, high=200, size=50)
print(f'min: {np.min(X)}')
print(f'max: {np.max(X)}')
print(f'mean: {np.mean(X)}')
print(f'count: {np.size(X)}')
```
You should see a:
* minimum value near, but not below 0
* maximum value near, but not above 200
* mean value somewhere near 100
* count value of exactly 50
Let's visualize the $x$-values, just to get some idea of the distribution of the values in the range of 0-200.
*How do we plot a one-dimensional list of values in two-dimensional space?*
We can plot it against itself!
```
import matplotlib.pyplot as plt
plt.plot(X, X, 'g.')
plt.show()
```
As you can see, we have a straight line of $x$-values that span from roughly 0 to 200. Let's now create some $y$-values via the equation $y=4x+10$ (i.e. the slope is 4 and the intercept is 10).
We'll call the new variable `Y_PRED` since it is the predicted variable.
```
SLOPE = 4
INTERCEPT = 10
Y_PRED = (SLOPE * X) + INTERCEPT
plt.plot(X, Y_PRED, 'b.')
plt.plot(X, Y_PRED, 'r-')
plt.show()
```
This regression line fits amazingly well! If only we could have this perfect of a fit in the real world. Unfortunately, this is almost never the case. There is always some variability.
Let's add some randomness into our $y$-values to get a more realistic dataset. We will keep our original $y$-values in order to remember our base regression line.
We'll recreate our original $y$-values and store them in `Y_PRED`. Then, we'll create `Y` with the same equation but with added randomness.
```
Y_PRED = (SLOPE * X) + INTERCEPT
randomness = np.random.uniform(low=-200, high=200, size=50)
Y = SLOPE * X + randomness + INTERCEPT
plt.plot(X, Y, 'b.')
plt.plot(X, Y_PRED, 'r-')
plt.show()
```
We now have the line that was used to generate the data plotted in red, and the randomly displaced data points in blue. The dots, though definitely not close to the line, at least follow the linear trend in general. This seems like a reasonable dataset for a linear regression.
Let's remind ourselves of the key variables in the model:
* `X`: the $x$-values that we used to "train" the model
* `Y`: the $y$-values that represent the actual values that correlate to `X`
* `Y_PRED`: the $y$-values that the model would predict for each $x$-value
## Coefficient of Determination
The **coefficient of determination**, denoted $R^2$, is one of the most important metrics in regression. It tells us how much of the data is "explained" by the model.
Before we can define the metric itself, we need to define a few other key terms.
A **residual** is the difference between the target value $y_i$ and the predicted value $\hat{y_i}$. The **residual sum of squares** is the summation of the square of every residual in the prediction set.
> $$ SS_{res} = \sum_{i}(y_i - \hat{y_i})^2$$
```
ss_res = ((Y - Y_PRED) ** 2).sum(axis=0, dtype=np.float64)
print(ss_res)
```
The **total sum of squares** is the sum of the squares of the difference between each value $y_i$ and their mean.
> $$\bar{y} = \frac{1}{n}\sum_{i=1}^{n}y_{i}$$
> $$SS_{tot} = \sum_{i}(y_{i}-\bar{y})^2$$
```
y_mean = np.average(Y, axis=0)
print(y_mean)
ss_tot = ((Y - y_mean)**2).sum(axis=0, dtype=np.float64)
print(ss_tot)
```
Given the total sum of squares and the residual sum of squares, we can calculate the coefficient of determination $R^2$.
> $$R^{2} = 1 - \frac{SS_{res}}{SS_{tot}}$$
```
r2 = 1 - (ss_res/ss_tot)
print(r2)
```
If you just ran the cells in this Colab from top to bottom you probably got a score of `0.846`.
Is this good? Bad? Mediocre?
The $R^2$ score measures how well the actual variance from $x$-values to $y$-values is represented in the variance between the $x$-values and the predicted $y$-values.
Typically, this score ranges from 0 to 1, where 0 is bad and 1 is a perfect mapping. However, the score can also be negative. Can you guess why?
If a line drawn horizontally through the data points performs better than your regression, then the $R^2$ score would be negative. If you see this, try again. Your model *really* isn't working.
For values in the range 0-1, interpreting the $R^2$ is more subjective. The closer to 0, the worse your model is at fitting the data. And generally, the closer to 1, the better (but you also don't want to overfit). This is where testing, observation, and experience come into play.
It turns out that scikit-learn can calculate $R^2$ for us:
```
from sklearn.metrics import r2_score
print(r2_score(Y, Y_PRED))
```
Knowing that we don't have to manually do all of the math again, let's now see the perfect and a very imperfect case of a regression fitting a dataset.
To begin with, we'll show a perfect fit. What happens if our predictions and our actual values are identical?
```
print(r2_score(Y, Y))
print(r2_score(Y_PRED, Y_PRED))
```
1.0: just what we thought! A perfect fit.
Now let's see if we can make a regression so poor that $R^2$ is negative.
In this case, we need to make our predicted data look different than our actuals. To do this, we'll negate our predictions and save them into a new variable called `Y_PRED_BAD`.
```
Y_PRED_BAD = -Y_PRED
plt.plot(X, Y, 'y.')
plt.plot(X, Y_PRED_BAD, 'r-')
```
That prediction line looks horrible! Indeed, a horizontal line would fit this data better.
Let's check the $R^2$.
```
print(r2_score(Y, Y_PRED_BAD))
```
A negative $R^2$ is rare in practice. But if you do ever see one, it means the model has gone quite wrong.
## Predicted vs. Actual Plots
We have now seen a quantitative way to measure the goodness-of-fit of our regressions: the coefficient of determination. We know that if we see negative numbers that our model is very broken and if we see numbers approaching 1, the model is decent (or overfitted). But what about the in-between?
This is where qualitative observations based on expert opinion needs to come into play.
There are numerous ways to visualize regression predictions, but one of the most basic is the "predicted vs. actual" plot.
To create this plot, we scatter-plot the actual $y$-values used to train our model against the predicted $y$-values generated from the training features. We then draw a line from the lowest prediction to the largest.
```
plt.plot(Y_PRED, Y, 'b.')
plt.plot([Y_PRED.min(), Y_PRED.max()], [Y_PRED.min(), Y_PRED.max()], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
```
In this case, the data points scatter pretty evenly around the prediction-to-actual line.
So what does a bad plot look like?
Let's first negate all of our predictions, making them exactly the opposite of what they should be. This creates the exact opposite of a good actual-vs-predicted line.
```
Y_BAD = -Y_PRED
plt.plot(Y, Y_BAD, 'b.')
plt.plot([Y_BAD.min(), Y_BAD.max()], [Y_BAD.min(), Y_BAD.max()], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
```
In this case we made a very contrived example where the predictions are exact opposites of the actual values. When you see this case, you have a model predicting roughly the opposite of what it should be predicting.
Let's look at another case, where we add a large positive bias to every prediction.
```
Y_BAD = Y_PRED + 200
plt.plot(Y, Y_BAD, 'b.')
plt.plot([Y_BAD.min(), Y_BAD.max()], [Y_BAD.min(), Y_BAD.max()], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
```
Now we have a situation where there is an obvious **bias**. All predictions are higher than the actual values, so the model needs to be adjusted to make smaller predictions.
Most cases aren't quite so obvious. In the chart below, you can see that the predictions are okay for low values, but tend to underpredict for larger target values.
```
Y_BAD = Y_PRED - Y_PRED / 4
plt.plot(Y, Y_BAD, 'b.')
plt.plot([Y_BAD.min(), Y_BAD.max()], [Y_BAD.min(), Y_BAD.max()], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
```
Predicted vs. actual charts are a useful tool for giving you a visual indication of how your model is performing. While single measures like $R^2$ give you an aggregated metric, charts allow you to see if there is a trend or outlier where your model isn't performing well.
If you identify problem areas, you can work on retraining your model.
## Residual Plots
Another helpful visualization tool is to plot the regression residuals. As a reminder, residuals are the difference between the actual values and the predicted values.
We plot residuals on the $y$-axis against the predicted values on the $x$-axis, and draw a horizontal line through $y=0$.
Cases where our predictions were too low are above the line. Cases where our predictions were too high are below the line.
```
RESIDUALS = Y - Y_PRED
plt.plot(Y_PRED, RESIDUALS, 'b.')
plt.plot([0, Y_PRED.max()], [0, 0], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.show()
```
In the "Predicted vs. Actual" section above, we plotted a case where there was a large positive bias in our predictions. Plotting the same biased data on a residual plot shows all of the residuals below the zero line.
```
RESIDUALS = Y - (Y_PRED + 200)
plt.plot(Y_PRED, RESIDUALS, 'b.')
plt.plot([0, Y_PRED.max()], [0, 0], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.show()
```
The other example in the "Predicted vs. Actual" section reduced our predictions by an amount proportional to the scale of the predictions. Below is the residual plot for that scenario.
```
RESIDUALS = Y - (Y_PRED - Y_PRED / 4)
plt.plot(Y_PRED, RESIDUALS, 'b.')
plt.plot([0, Y_PRED.max()], [0, 0], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.show()
```
## Resources
* [Coefficient of Determination](https://en.wikipedia.org/wiki/Coefficient_of_determination)
* [Interpreting Residual Plots](http://docs.statwing.com/interpreting-residual-plots-to-improve-your-regression/#gallery)
# Exercises
The [Interpreting Residual Plots](http://docs.statwing.com/interpreting-residual-plots-to-improve-your-regression/#gallery) resource gives examples of patterns in different residual plots and what those patterns might mean for your model.
Each exercise below contains code that generates an image. Run the code to view the image, and then find the corresponding pattern name in [Interpreting Residual Plots](http://docs.statwing.com/interpreting-residual-plots-to-improve-your-regression/#gallery). Note the name of the pattern in the answer cell, and provide a one or two sentence explanation of what this could signal about your model's predictions.
## Exercise 1
Run the code below to generate an image. Identify the corresponding residual plot pattern, and write a sentence or two about what this could signal about the model.
```
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
x = np.linspace(-10.0, 10.0, 100)
y = np.linspace(-10.0, 10.0, 100)
f = x**2 + y**2 + np.random.uniform(low=-14, high=14, size=100)
plt.plot(x, f, 'b.')
plt.plot([x.min(), x.max()], [0, 0], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.show()
```
### **Student Solution**
*Which [plot pattern](http://docs.statwing.com/interpreting-residual-plots-to-improve-your-regression/#gallery) does this residual plot follow? And what might it mean about the model?*
---
## Exercise 2
Run the code below to generate an image. Identify the corresponding residual plot pattern, and write a sentence or two about what this could signal about the model.
```
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
x = np.linspace(0.0, 10.0, 100)
y = np.concatenate([
np.random.uniform(low=-5, high=5, size=90),
np.random.uniform(low=50, high=55, size=10)
])
plt.plot(x, y, 'b.')
plt.plot([x.min(), x.max()], [0, 0], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.show()
```
### **Student Solution**
*Which [plot pattern](http://docs.statwing.com/interpreting-residual-plots-to-improve-your-regression/#gallery) does this residual plot follow? And what might it mean about the model?*
---
## Exercise 3
Run the code below to generate an image. Identify the corresponding residual plot pattern, and write a sentence or two about what this could signal about the model.
```
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
x = np.concatenate([
np.random.uniform(low=0, high=2, size=90),
np.random.uniform(low=4, high=10, size=10)
])
y = np.random.uniform(low=-5, high=5, size=100)
plt.plot(x, y, 'b.')
plt.plot([x.min(), x.max()], [0, 0], 'r-')
plt.xlabel('Predicted')
plt.ylabel('Residual')
plt.show()
```
### **Student Solution**
*Which [plot pattern](http://docs.statwing.com/interpreting-residual-plots-to-improve-your-regression/#gallery) does this residual plot follow? And what might it mean about the model?*
---
|
github_jupyter
|
# openvino2tensorflow
This tutorial explains the use case of openvino2tensorflow while using arachne.
`openvino2tensorflow` is developed in the following GitHub repository.
https://github.com/PINTO0309/openvino2tensorflow
When you convert onnx model to tensorflow model by `onnx-tf`, the converted model includes many unnecessary transpose layers. This is because onnx has NCHW layer format while tensorflow has NHWC.
The inclusion of many unnecessary transpose layers causes performance degradation in inference.
By using openvino2tensorflow, you can avoid the inclusion of unnecessary transpose layers when converting a model from to tensorflow.
In this tutorial, we compare two convert methods and their converted models:
1. PyTorch -> (torch2onnx) -> ONNX -> (onnx-simplifier) -> ONNX -> (onnx-tf) -> Tensorflow -> (tflite_converter) -> TfLite
2. PyTorch -> (torch2onnx) -> ONNX -> (onnx-simplifier) -> ONNX -> (openvino_mo) -> OpenVino -> (openvino2tensorflow) -> Tensorflow -> (tflite_converter) -> TfLite
The developers of openvino2tensorflow provides the detail article about the advantage using openvino2tensorflow: [Converting PyTorch, ONNX, Caffe, and OpenVINO (NCHW) models to Tensorflow / TensorflowLite (NHWC) in a snap](https://qiita.com/PINTO/items/ed06e03eb5c007c2e102)
## Create Simple Model
Here we create and save a very simple PyTorch model to be converted.
```
import torch
from torch import nn
import torch.onnx
model = nn.Sequential(
nn.Conv2d(3, 16, 3, padding=1),
nn.Conv2d(16, 16, 3, padding=1),
)
torch.save(model.eval(), "./sample.pth")
```
Save model input and output information as yaml format for `arachne`.
```
yml = """
inputs:
- dtype: float32
name: input
shape:
- 1
- 3
- 224
- 224
outputs:
- dtype: float32
name: output
shape:
- 1
- 16
- 224
- 224
"""
open("sample.yml", "w").write(yml)
```
## Convert using onnx-tf
You can apply multiple tools in sequence with `arachne.pipeline`.
Models are converted in the following order:
PyTorch -> (torch2onnx) -> ONNX -> (onnx-simplifier) -> ONNX -> (onnx-tf) -> Tensorflow -> (tflite_converter) -> TfLite
```
!python -m arachne.driver.pipeline \
+pipeline=[torch2onnx,onnx_simplifier,onnx_tf,tflite_converter] \
model_file=./sample.pth \
output_path=./pipeline1.tar \
model_spec_file=./sample.yml
```
Extract tarfile and see network structure of the converted tflite model.
You can visualize model structure in netron: `netron ./pipeline1/model_0.tflite`.
```
!mkdir -p pipeline1 && tar xvf pipeline1.tar -C ./pipeline1
import tensorflow as tf
def list_layers(model_path):
interpreter = tf.lite.Interpreter(model_path)
layer_details = interpreter.get_tensor_details()
interpreter.allocate_tensors()
for layer in layer_details:
print("Layer Name: {}".format(layer['name']))
list_layers("./pipeline1/model_0.tflite")
```
We have confirmed that the transpose layer is unexpectedly included.
## Convert using openvino2tensorflow
Next, try the second conversion method using openvino2tensorflow.
Models are converted in the following order:
PyTorch -> (torch2onnx) -> ONNX -> (onnx-simplifier) -> ONNX -> (openvino_mo) -> OpenVino -> (openvino2tensorflow) -> Tensorflow -> (tflite_converter) -> TfLite
```
!python -m arachne.driver.pipeline \
+pipeline=[torch2onnx,onnx_simplifier,openvino_mo,openvino2tf,tflite_converter] \
model_file=./sample.pth \
output_path=./pipeline2.tar \
model_spec_file=./sample.yml
```
Extract tarfile and see network structure of the converted tflite model.
You can visualize model structure in netron: `netron ./pipeline2/model_0.tflite`.
```
!mkdir -p pipeline2 && tar xvf pipeline2.tar -C ./pipeline2
list_layers("./pipeline2/model_0.tflite")
```
We have confirmed that the transpose layer is NOT included.
|
github_jupyter
|
# 处理数据
数据是构建机器学习模型的基础。在云中集中管理数据,并使在多个工作站上运行试验和训练模型的数据科学家团队能够访问这些数据以及计算目标,这是任何专业数据科学解决方案的重要组成部分。
在该笔记本中,你将探索两个用于数据处理的 Azure 机器学习对象:数据存储和数据集。
## 连接到工作区
首先,请连接到你的工作区。
> **备注**:如果尚未与 Azure 订阅建立经过身份验证的会话,则系统将提示你通过执行以下操作进行身份验证:单击链接,输入验证码,然后登录到 Azure。
```
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
```
## 使用数据存储
在 Azure ML 中,数据存储是对存储位置(例如 Azure 存储 Blob 容器)的引用。每个工作区都有一个默认的数据存储,该存储通常是使用相应工作区创建的 Azure 存储 Blob 容器。如果需要使用存储在不同位置的数据,则可以将自定义数据存储添加到工作区中,并将其中任何一个设置为默认值。
### 查看数据存储
运行以下代码以确定工作区中的数据存储:
```
# Get the default datastore
default_ds = ws.get_default_datastore()
# Enumerate all datastores, indicating which is the default
for ds_name in ws.datastores:
print(ds_name, "- Default =", ds_name == default_ds.name)
```
你还可以在 [Azure 机器学习工作室](https://ml.azure.com)中工作区的“**数据集**”页面上查看和管理工作区中的数据存储。
### 将数据上传到数据存储
确定可用的数据存储后,可以将文件从本地文件系统上传到数据存储,这样无论试验脚本实际在何处运行,工作区中运行的试验都能访问相应文件。
```
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data
target_path='diabetes-data/', # Put it in a folder path in the datastore
overwrite=True, # Replace existing files of the same name
show_progress=True)
```
## 使用数据集
Azure 机器学习以数据集的形式提供数据的抽象。数据集是对可能要在试验中使用的一组特定数据的版本控制引用。数据集可以采用表格格式,也可以采用文件格式。
### 创建表格数据集
接下来根据上传到数据存储的糖尿病数据创建数据集,然后查看前 20 条记录。这种情况下,数据在 CSV 文件中采用结构化格式,因此我们将使用表格数据集。
```
from azureml.core import Dataset
# Get the default datastore
default_ds = ws.get_default_datastore()
#Create a tabular dataset from the path on the datastore (this may take a short while)
tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv'))
# Display the first 20 rows as a Pandas dataframe
tab_data_set.take(20).to_pandas_dataframe()
```
如上述代码中所见,可以轻松地将表格数据集转换为 Pandas 数据帧,从而使用常见的 python 技术处理数据。
### 创建文件数据集
你创建的数据集是表格数据集,可以在数据集定义所包含的结构化文件中作为包含所有数据的数据帧读取。这对于表格数据非常有效,但在某些机器学习场景中,可能需要使用非结构化数据;或者你可能只想通过自己的代码读取文件中的数据。为此,可以使用文件数据集,该数据集在虚拟装入点创建文件路径列表,用于读取文件中的数据。
```
#Create a file dataset from the path on the datastore (this may take a short while)
file_data_set = Dataset.File.from_files(path=(default_ds, 'diabetes-data/*.csv'))
# Get the files in the dataset
for file_path in file_data_set.to_path():
print(file_path)
```
### 注册数据集
创建引用糖尿病数据的数据集后,可以将其注册,确保工作区中运行的所有试验可轻松对其进行访问。
我们将表格数据集注册为“**糖尿病数据集**”,将文件数据集注册为“**糖尿病文件**”。
```
# Register the tabular dataset
try:
tab_data_set = tab_data_set.register(workspace=ws,
name='diabetes dataset',
description='diabetes data',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
# Register the file dataset
try:
file_data_set = file_data_set.register(workspace=ws,
name='diabetes file dataset',
description='diabetes files',
tags = {'format':'CSV'},
create_new_version=True)
except Exception as ex:
print(ex)
print('Datasets registered')
```
你可以在 [Azure 机器学习工作室](https://ml.azure.com)中工作区的“**数据集**”页面上查看和管理数据集。你还可以从工作区对象获取数据集列表:
```
print("Datasets:")
for dataset_name in list(ws.datasets.keys()):
dataset = Dataset.get_by_name(ws, dataset_name)
print("\t", dataset.name, 'version', dataset.version)
```
通过对数据集进行版本控制,可以重新定义数据集,从而无需破坏依赖先前定义的现有试验或管道。默认返回最新版本的已命名数据集,但可以通过指定版本号检索特定版本的数据集,如下所示:
```python
dataset_v1 = Dataset.get_by_name(ws, 'diabetes dataset', version = 1)
```
### 从表格数据集训练模型
有数据集后,即可开始从中训练模型。可以在运行脚本的估算器中将数据集作为输入传递给脚本。
运行以下两个代码单元格,创建以下内容:
1. 名为 **diabetes_training_from_tab_dataset** 的文件夹
2. 通过使用作为参数传递给分类模型的表格数据集来训练分类模型的脚本。
```
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_tab_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import os
import argparse
from azureml.core import Run, Dataset
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Get the script arguments (regularization rate and training dataset ID)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument("--input-data", type=str, dest='training_dataset_id', help='training dataset')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# Get the training dataset
print("Loading Data...")
diabetes = run.input_datasets['training_data'].to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
> **备注**:在脚本中,数据集作为形参(或实参)传递。对于表格数据集,此参数将包含已注册数据集的 ID,因此,你可以在脚本中编写代码以从运行上下文中获取试验的工作区,然后使用其 ID 获取数据集,如下所示:
>
> ```
> run = Run.get_context()
> ws = run.experiment.workspace
> dataset = Dataset.get_by_id(ws, id=args.training_dataset_id)
> diabetes = dataset.to_pandas_dataframe()
> ```
>
> 但是,Azure 机器学习运行时会自动识别引用命名数据集的参数并将其添加到运行的 **input_datasets** 集合中,因此你还可以通过指定其“易记名称”来从该集合检索数据集(稍后你将看到,它在试验的脚本运行配置中的参数定义中指定)。这是上面脚本中采用的方法。
现在,可以试验方式运行脚本,为训练数据集定义一个由脚本读取的参数。
> **备注**:“数据集”类依赖于 **azureml-dataprep** 包中的一些组件,因此需要将此包包含在将运行训练试验的环境中。**azureml-dataprep** 包包含在 **azure-defaults** 包中。
```
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.widgets import RunDetails
# Create a Python environment for the experiment (from a .yml file)
env = Environment.from_conda_specification("experiment_env", "environment.yml")
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes dataset")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
arguments = ['--regularization', 0.1, # Regularizaton rate parameter
'--input-data', diabetes_ds.as_named_input('training_data')], # Reference to dataset
environment=env)
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
```
> **备注**: **--input-data** 参数将数据集作为指定输入传递,其中包括该数据集的易记名称,脚本在试验运行中使用该名称从 **input_datasets** 集合读取它。**--input-data** 参数中的字符串值实际上是已注册数据集的 ID。作为一种替代方法,可以只传递 `diabetes_ds.id`,在这种情况下,脚本可以从脚本参数访问数据集 ID,并使用该 ID 从工作区(而不是从 **input_datasets** 集合)获取数据集。
首次运行试验时,可能需要一些时间来设置 Python 环境 - 后续运行会更快。
试验完成后,在小组件中查看 **azureml-logs/70_driver_log.txt** 输出日志和运行所生成的指标。
### 注册训练后的模型
与任何训练试验一样,可以检索训练后的模型并在 Azure 机器学习工作区中注册它。
```
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Tabular dataset'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
### 从文件数据集训练模型
你已了解了如何使用表格数据集中的训练数据来训练模型。但文件数据集呢?
使用文件数据集时,传递给脚本的数据集参数表示包含文件路径的装入点。从这些文件中读取数据的方式取决于文件中的数据类型及其预期用途。对于糖尿病 CSV 文件,可以使用 Python **glob** 模块在数据集定义的虚拟装入点中创建文件列表,并将其全部读入可联结为单个数据帧的 Pandas 数据帧中。
运行以下两个代码单元格,创建以下内容:
1. 名为 **diabetes_training_from_file_dataset** 的文件夹
2. 使用文件数据集(作为输入传递给脚本)训练分类模型的脚本。
```
import os
# Create a folder for the experiment files
experiment_folder = 'diabetes_training_from_file_dataset'
os.makedirs(experiment_folder, exist_ok=True)
print(experiment_folder, 'folder created')
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import os
import argparse
from azureml.core import Dataset, Run
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import glob
# Get script arguments (rgularization rate and file dataset mount point)
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
parser.add_argument('--input-data', type=str, dest='dataset_folder', help='data mount point')
args = parser.parse_args()
# Set regularization hyperparameter (passed as an argument to the script)
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
print("Loading Data...")
data_path = run.input_datasets['training_files'] # Get the training data path from the input
# (You could also just use args.dataset_folder if you don't want to rely on a hard-coded friendly name)
# Read the files
all_files = glob.glob(data_path + "/*.csv")
diabetes = pd.concat((pd.read_csv(f) for f in all_files), sort=False)
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
与表格数据集一样,你可以使用其易记名称从 **input_datasets** 集合中检索文件数据集。还可以从脚本参数检索它,对于文件数据集,脚本参数包含文件的安装路径(而不是传递给表格数据集的数据集 ID)。
接下来需要更改将数据集传递到脚本的方式 - 这需要定义脚本可以从中读取文件的路径。可以使用 **as_download** 或 **as_mount** 方法来执行此操作。使用 **as_download** 会将文件数据集中的文件下载到计算机上运行脚本的临时位置,而 **as_mount** 会创建一个装入点,可以从该装入点直接从数据集传输文件。
可以将访问方法与 **as_named_input** 方法结合使用,以在试验运行中将数据集包含在 **input_datasets** 集合中(如果不这样做,例如,通过将参数设置为 `diabetes_ds.as_mount()`,则脚本将能够从脚本参数(而不是从 **input_datasets** 集合)访问数据集装入点)。
```
from azureml.core import Experiment
from azureml.widgets import RunDetails
# Get the training dataset
diabetes_ds = ws.datasets.get("diabetes file dataset")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_training.py',
arguments = ['--regularization', 0.1, # Regularizaton rate parameter
'--input-data', diabetes_ds.as_named_input('training_files').as_download()], # Reference to dataset location
environment=env) # Use the environment created previously
# submit the experiment
experiment_name = 'mslearn-train-diabetes'
experiment = Experiment(workspace=ws, name=experiment_name)
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
```
试验完成后,在小组件中查看 **azureml-logs/70_driver_log.txt** 输出日志,以验证文件数据集中的文件已下载到临时文件夹中,从而使脚本能够读取文件。
### 注册训练后的模型
同样,可以注册由试验训练的模型。
```
from azureml.core import Model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'File dataset'}, properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
> **详细信息**:有关使用数据集进行训练的详细信息,请参阅 Azure ML 文档中的[使用数据集进行训练](https://docs.microsoft.com/azure/machine-learning/how-to-train-with-datasets)。
|
github_jupyter
|
```
!pip install git+https://github.com/zhy0/dmarket_rl
!pip install ray[rllib]
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from dmarket.environments import MultiAgentTrainingEnv
from dmarket.info_settings import OfferInformationSetting, BlackBoxSetting
from dmarket.agents import GymRLAgent
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.agents import pg
from ray.tune import register_env
from gym.spaces import Box, Discrete, Tuple
class MultiWrapper(MultiAgentTrainingEnv, MultiAgentEnv):
def __init__(self, env_config):
super().__init__(**env_config)
rl_agents = [
GymRLAgent('seller', 90, 'S1', max_factor=0.25, discretization=20),
GymRLAgent('seller', 90, 'S2', max_factor=0.25, discretization=20),
GymRLAgent('seller', 90, 'S3', max_factor=0.25, discretization=20),
GymRLAgent('buyer', 110, 'B1', max_factor=0.25, discretization=20),
GymRLAgent('buyer', 110, 'B2', max_factor=0.25, discretization=20),
GymRLAgent('buyer', 110, 'B3', max_factor=0.25, discretization=20),
]
fixed_agents = []
setting = BlackBoxSetting()
env = MultiAgentTrainingEnv(rl_agents, fixed_agents, setting)
my_policy = (None, env.observation_space, Discrete(20), {})
my_group = (
None,
Tuple([env.observation_space for i in range(3)]),
Tuple([Discrete(20) for i in range(3)]),
{},
)
def select_policy(agent_id):
"""This function maps the agent id to the policy id"""
return agent_id
# We name our policies the same as our RL agents
policies = {
'S1': my_policy,
'S2': my_policy,
'S3': my_policy,
'B1': my_policy,
'B2': my_policy,
'B3': my_policy,
'Sellers': my_group
}
EXP_NAME = "multi_seller_group"
def run_experiment(iterations):
grouped_sellers = lambda config: MultiWrapper(config).with_agent_groups(groups={
"Sellers": ['S1', 'S2', 'S3']
})
register_env("grouped_sellers", grouped_sellers)
trainer = pg.PGTrainer(env="grouped_sellers", config={
"env_config": {"rl_agents": rl_agents, "fixed_agents": fixed_agents, "setting": setting},
"log_level": "ERROR",
"timesteps_per_iteration": 30,
"multiagent": {
"policies": policies,
"policy_mapping_fn": select_policy
}
})
rewards = []
episodes = []
episode_len = []
for i in range(iterations):
result = trainer.train()
rewards.append(result['policy_reward_mean'])
episodes.append(result['episodes_total'])
episode_len.append(result['episode_len_mean'])
df = pd.DataFrame(rewards, index=episodes)
df.index.name = 'Episodes'
df['episode_len'] = episode_len
return df
%%time
N_iter = 500
runs = 5
for i in range(runs):
result = run_experiment(N_iter)
plt.figure()
result.plot()
result.to_csv(f'{EXP_NAME}_iter{N_iter}_run{i}.csv')
```
|
github_jupyter
|
# Ingest Text Data
Labeled text data can be in a structured data format, such as reviews for sentiment analysis, news headlines for topic modeling, or documents for text classification. In these cases, you may have one column for the label, one column for the text, and sometimes other columns for attributes. You can treat this structured data like tabular data, and ingest it in one of the ways discussed in the previous notebook [011_Ingest_tabular_data.ipynb](011_Ingest_tabular_data_v1.ipynb). Sometimes text data, especially raw text data comes as unstructured data and is often in .json or .txt format, and we will discuss how to ingest these types of data files into a SageMaker Notebook in this section.
## Set Up Notebook
```
%pip install -qU 'sagemaker>=2.15.0' 's3fs==0.4.2'
import pandas as pd
import json
import glob
import s3fs
import sagemaker
# Get SageMaker session & default S3 bucket
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket() # replace with your own bucket if you have one
s3 = sagemaker_session.boto_session.resource('s3')
prefix = 'text_spam/spam'
prefix_json = 'json_jeo'
filename = 'SMSSpamCollection.txt'
filename_json = 'JEOPARDY_QUESTIONS1.json'
```
## Downloading data from Online Sources
### Text data (in structured .csv format): Twitter -- sentiment140
**Sentiment140** This is the sentiment140 dataset. It contains 1.6M tweets extracted using the twitter API. The tweets have been annotated with sentiment (0 = negative, 4 = positive) and topics (hashtags used to retrieve tweets). The dataset contains the following columns:
* `target`: the polarity of the tweet (0 = negative, 4 = positive)
* `ids`: The id of the tweet ( 2087)
* `date`: the date of the tweet (Sat May 16 23:58:44 UTC 2009)
* `flag`: The query (lyx). If there is no query, then this value is NO_QUERY.
* `user`: the user that tweeted (robotickilldozr)
* `text`: the text of the tweet (Lyx is cool
[Second Twitter data](https://github.com/guyz/twitter-sentiment-dataset) is a Twitter data set collected as an extension to Sanders Analytics Twitter sentiment corpus, originally designed for training and testing Twitter sentiment analysis algorithms. We will use this data to showcase how to aggregate two data sets if you want to enhance your current data set by adding more data to it.
```
#helper functions to upload data to s3
def write_to_s3(filename, bucket, prefix):
#put one file in a separate folder. This is helpful if you read and prepare data with Athena
filename_key = filename.split('.')[0]
key = "{}/{}/{}".format(prefix,filename_key,filename)
return s3.Bucket(bucket).upload_file(filename,key)
def upload_to_s3(bucket, prefix, filename):
url = 's3://{}/{}/{}'.format(bucket, prefix, filename)
print('Writing to {}'.format(url))
write_to_s3(filename, bucket, prefix)
#run this cell if you are in SageMaker Studio notebook
#!apt-get install unzip
#download first twitter dataset
!wget http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip -O sentimen140.zip
# Uncompressing
!unzip -o sentimen140.zip -d sentiment140
#upload the files to the S3 bucket
csv_files = glob.glob("sentiment140/*.csv")
for filename in csv_files:
upload_to_s3(bucket, 'text_sentiment140', filename)
#download second twitter dataset
!wget https://raw.githubusercontent.com/zfz/twitter_corpus/master/full-corpus.csv
filename = 'full-corpus.csv'
upload_to_s3(bucket, 'text_twitter_sentiment_2', filename)
```
### Text data (in .txt format): SMS Spam data
[SMS Spam Data](https://archive.ics.uci.edu/ml/datasets/sms+spam+collection) was manually extracted from the Grumbletext Web site. This is a UK forum in which cell phone users make public claims about SMS spam messages, most of them without reporting the very spam message received. Each line in the text file has the correct class followed by the raw message. We will use this data to showcase how to ingest text data in .txt format.
```
txt_files = glob.glob("spam/*.txt")
for filename in txt_files:
upload_to_s3(bucket, 'text_spam', filename)
!wget http://www.dt.fee.unicamp.br/~tiago/smsspamcollection/smsspamcollection.zip -O spam.zip
!unzip -o spam.zip -d spam
```
### Text Data (in .json format): Jeopardy Question data
[Jeopardy Question](https://j-archive.com/) was obtained by crawling the Jeopardy question archive website. It is an unordered list of questions where each question has the following key-value pairs:
* `category` : the question category, e.g. "HISTORY"
* `value`: dollar value of the question as string, e.g. "\$200"
* `question`: text of question
* `answer` : text of answer
* `round`: one of "Jeopardy!","Double Jeopardy!","Final Jeopardy!" or "Tiebreaker"
* `show_number` : string of show number, e.g '4680'
* `air_date` : the show air date in format YYYY-MM-DD
```
#json file format
!wget http://skeeto.s3.amazonaws.com/share/JEOPARDY_QUESTIONS1.json.gz
# Uncompressing
!gunzip -f JEOPARDY_QUESTIONS1.json.gz
filename = 'JEOPARDY_QUESTIONS1.json'
upload_to_s3(bucket, 'json_jeo', filename)
```
## Ingest Data into Sagemaker Notebook
## Method 1: Copying data to the Instance
You can use the AWS Command Line Interface (CLI) to copy your data from s3 to your SageMaker instance. This is a quick and easy approach when you are dealing with medium sized data files, or you are experimenting and doing exploratory analysis. The documentation can be found [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html).
```
#Specify file names
prefix = 'text_spam/spam'
prefix_json = 'json_jeo'
filename = 'SMSSpamCollection.txt'
filename_json = 'JEOPARDY_QUESTIONS1.json'
prefix_spam_2 = 'text_spam/spam_2'
#copy data to your sagemaker instance using AWS CLI
!aws s3 cp s3://$bucket/$prefix_json/ text/$prefix_json/ --recursive
data_location = "text/{}/{}".format(prefix_json, filename_json)
with open(data_location) as f:
data = json.load(f)
print(data[0])
```
## Method 2: Use AWS compatible Python Packages
When you are dealing with large data sets, or do not want to lose any data when you delete your Sagemaker Notebook Instance, you can use pre-built packages to access your files in S3 without copying files into your instance. These packages, such as `Pandas`, have implemented options to access data with a specified path string: while you will use `file://` on your local file system, you will use `s3://` instead to access the data through the AWS boto library. For `pandas`, any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. You can find additional documentation [here](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html).
For text data, most of the time you can read it as line-by-line files or use Pandas to read it as a DataFrame by specifying a delimiter.
```
data_s3_location = "s3://{}/{}/{}".format(bucket, prefix, filename) # S3 URL
s3_tabular_data = pd.read_csv(data_s3_location, sep="\t", header=None)
s3_tabular_data.head()
```
For JSON files, depending on the structure, you can also use `Pandas` `read_json` function to read it if it's a flat json file.
```
data_json_location = "s3://{}/{}/{}".format(bucket, prefix_json, filename_json)
s3_tabular_data_json = pd.read_json(data_json_location, orient='records')
s3_tabular_data_json.head()
```
## Method 3: Use AWS Native methods
#### s3fs
[S3Fs](https://s3fs.readthedocs.io/en/latest/) is a Pythonic file interface to S3. It builds on top of botocore. The top-level class S3FileSystem holds connection information and allows typical file-system style operations like cp, mv, ls, du, glob, etc., as well as put/get of local files to/from S3.
```
fs = s3fs.S3FileSystem()
data_s3fs_location = "s3://{}/{}/".format(bucket, prefix)
# To List all files in your accessible bucket
fs.ls(data_s3fs_location)
# open it directly with s3fs
data_s3fs_location = "s3://{}/{}/{}".format(bucket, prefix, filename) # S3 URL
with fs.open(data_s3fs_location) as f:
print(pd.read_csv(f, sep = '\t', nrows = 2))
```
# Aggregating Data Set
If you would like to enhance your data with more data collected for your use cases, you can always aggregate your newly-collected data with your current data set. We will use the two data set -- Sentiment140 and Sanders Twitter Sentiment to show how to aggregate data together.
```
prefix_tw1 = 'text_sentiment140/sentiment140'
filename_tw1 = 'training.1600000.processed.noemoticon.csv'
prefix_added = 'text_twitter_sentiment_2'
filename_added = 'full-corpus.csv'
```
Let's read in our original data and take a look at its format and schema:
```
data_s3_location_base = "s3://{}/{}/{}".format(bucket, prefix_tw1, filename_tw1) # S3 URL
# we will showcase with a smaller subset of data for demonstration purpose
text_data = pd.read_csv(data_s3_location_base, header = None,
encoding = "ISO-8859-1", low_memory=False,
nrows = 10000)
text_data.columns = ['target', 'tw_id', 'date', 'flag', 'user', 'text']
```
We have 6 columns, `date`, `text`, `flag` (which is the topic the twitter was queried), `tw_id` (tweet's id), `user` (user account name), and `target` (0 = neg, 4 = pos).
```
text_data.head(1)
```
Let's read in and take a look at the data we want to add to our original data.
We will start by checking for columns for both data sets. The new data set has 5 columns, `TweetDate` which maps to `date`, `TweetText` which maps to `text`, `Topic` which maps to `flag`, `TweetId` which maps to `tw_id`, and `Sentiment` mapped to `target`. In this new data set, we don't have `user account name` column, so when we aggregate two data sets we can add this column to the data set to be added and fill it with `NULL` values. You can also remove this column from the original data if it does not provide much valuable information based on your use cases.
```
data_s3_location_added = "s3://{}/{}/{}".format(bucket, prefix_added, filename_added) # S3 URL
# we will showcase with a smaller subset of data for demonstration purpose
text_data_added = pd.read_csv(data_s3_location_added,
encoding = "ISO-8859-1", low_memory=False,
nrows = 10000)
text_data_added.head(1)
```
#### Add the missing column to the new data set and fill it with `NULL`
```
text_data_added['user'] = ""
```
#### Renaming the new data set columns to combine two data sets
```
text_data_added.columns = ['flag', 'target', 'tw_id', 'date', 'text', 'user']
text_data_added.head(1)
```
#### Change the `target` column to the same format as the `target` in the original data set
Note that the `target` column in the new data set is marked as "positive", "negative", "neutral", and "irrelevant", whereas the `target` in the original data set is marked as "0" and "4". So let's map "positive" to 4, "neutral" to 2, and "negative" to 0 in our new data set so that they are consistent. For "irrelevant", which are either not English or Spam, you can either remove these if it is not valuable for your use case (In our use case of sentiment analysis, we will remove those since these text does not provide any value in terms of predicting sentiment) or map them to -1.
```
#remove tweets labeled as irelevant
text_data_added = text_data_added[text_data_added['target'] != 'irelevant']
# convert strings to number targets
target_map = {'positive': 4, 'negative': 0, 'neutral': 2}
text_data_added['target'] = text_data_added['target'].map(target_map)
```
#### Combine the two data sets and save as one new file
```
text_data_new = pd.concat([text_data, text_data_added])
filename = 'sentiment_full.csv'
text_data_new.to_csv(filename, index = False)
upload_to_s3(bucket, 'text_twitter_sentiment_full', filename)
```
### Citation
Twitter140 Data, Go, A., Bhayani, R. and Huang, L., 2009. Twitter sentiment classification using distant supervision. CS224N Project Report, Stanford, 1(2009), p.12.
SMS Spaming data, Almeida, T.A., Gómez Hidalgo, J.M., Yamakami, A. Contributions to the Study of SMS Spam Filtering: New Collection and Results. Proceedings of the 2011 ACM Symposium on Document Engineering (DOCENG'11), Mountain View, CA, USA, 2011.
J! Archive, J! Archive is created by fans, for fans. The Jeopardy! game show and all elements thereof, including but not limited to copyright and trademark thereto, are the property of Jeopardy Productions, Inc. and are protected under law. This website is not affiliated with, sponsored by, or operated by Jeopardy Productions, Inc.
|
github_jupyter
|
# New Style HDMI input and Pixel Formatting
This notebook introduces the new features of PYNQ 2.0 for interacting with the video pipeline. The API has been completely
redesigned with high performance image processing applications in mind.
To start, download the base overlay and instantiate the HDMI input and output.
```
from pynq.overlays.base import BaseOverlay
from pynq.lib.video import *
base = BaseOverlay("base.bit")
hdmi_in = base.video.hdmi_in
hdmi_out = base.video.hdmi_out
```
## Getting started
First we'll use the default pixel format which is 24 bit-per-pixel BGR formatted data for ease of use with OpenCV.
```
hdmi_in.configure()
hdmi_out.configure(hdmi_in.mode)
hdmi_in.start()
hdmi_out.start()
```
The monitor should turn on and show a blank screen. To pass the image data through we can tie the output to the input. The tie will last until we send something else to be displayed.
```
hdmi_in.tie(hdmi_out)
```
While this provides for a fast way of passing video data through the pipeline there is no way to access or modify the frames. For that we a loop calling `readframe` and `writeframe`.
```
import time
numframes = 600
start = time.time()
for _ in range(numframes):
f = hdmi_in.readframe()
hdmi_out.writeframe(f)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
```
Next we can start adding some OpenCV processing into the mix. For all of these examples we are going to use a Laplacian gradient filter. The first loop is going to perform the grayscale conversion in software.
```
import cv2
import numpy as np
numframes = 10
grayscale = np.ndarray(shape=(hdmi_in.mode.height, hdmi_in.mode.width),
dtype=np.uint8)
result = np.ndarray(shape=(hdmi_in.mode.height, hdmi_in.mode.width),
dtype=np.uint8)
start = time.time()
for _ in range(numframes):
inframe = hdmi_in.readframe()
cv2.cvtColor(inframe,cv2.COLOR_BGR2GRAY,dst=grayscale)
inframe.freebuffer()
cv2.Laplacian(grayscale, cv2.CV_8U, dst=result)
outframe = hdmi_out.newframe()
cv2.cvtColor(result, cv2.COLOR_GRAY2BGR,dst=outframe)
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
```
## Cleaning up
Finally you must always stop the interfaces when you are done with them. Otherwise bad things can happen when the bitstream is reprogrammed. You can also use the HDMI interfaces in a context manager to ensure that the cleanup is always performed.
```
hdmi_out.close()
hdmi_in.close()
```
## Cacheable and non-cacheable frames
By default frames used by the HDMI subsystem are marked as cacheable meaning that the CPU cache is available for speeding up software operation at the expense of needing to flush frames prior to handing them off to the video system. This flushing is handled by PYNQ but can still impose a significant perfomance penalty particularly on 32-bit ARM architectures. To mitigate this the `cacheable_frames` property can be set to `False` on the `hdmi_in` and `hdmi_out` subsystems. This will improve the performance of passing frames between accelerators at the expense of software libraries operating more slowly or in some cases not working at all.
```
base.download()
hdmi_in.configure()
hdmi_out.configure(hdmi_in.mode)
hdmi_out.cacheable_frames = False
hdmi_in.cacheable_frames = False
hdmi_out.start()
hdmi_in.start()
```
Re-running the plain read-write loop now shows 60 FPS
```
numframes = 600
start = time.time()
for _ in range(numframes):
f = hdmi_in.readframe()
hdmi_out.writeframe(f)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
```
At the expense of much slower OpenCV performance
```
numframes = 10
start = time.time()
for _ in range(numframes):
inframe = hdmi_in.readframe()
cv2.cvtColor(inframe,cv2.COLOR_BGR2GRAY,dst=grayscale)
inframe.freebuffer()
cv2.Laplacian(grayscale, cv2.CV_8U, dst=result)
outframe = hdmi_out.newframe()
cv2.cvtColor(result, cv2.COLOR_GRAY2BGR,dst=outframe)
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
hdmi_out.close()
hdmi_in.close()
```
## Gray-scale
Using the new infrastructure we can delegate the color conversion to the hardware as well as only passing a single grayscale pixel to and from the processor.
First reconfigure the pipelines in grayscale mode and tie the two together to make sure everything is working correctly.
```
base.download()
hdmi_in.configure(PIXEL_GRAY)
hdmi_out.configure(hdmi_in.mode)
hdmi_in.cacheable_frames = True
hdmi_out.cacheable_frames = True
hdmi_in.start()
hdmi_out.start()
hdmi_in.tie(hdmi_out)
```
Now we can rewrite the loop without the software colour conversion.
```
start = time.time()
numframes = 30
for _ in range(numframes):
inframe = hdmi_in.readframe()
outframe = hdmi_out.newframe()
cv2.Laplacian(inframe, cv2.CV_8U, dst=outframe)
inframe.freebuffer()
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
hdmi_out.close()
hdmi_in.close()
```
## Other modes
There are two other 24 bit modes that are useful for interacting with PIL. The first is regular RGB mode.
```
base.download()
hdmi_in.configure(PIXEL_RGB)
hdmi_out.configure(hdmi_in.mode, PIXEL_RGB)
hdmi_in.start()
hdmi_out.start()
hdmi_in.tie(hdmi_out)
```
This is useful for easily creating and displaying frames with Pillow.
```
import PIL.Image
frame = hdmi_in.readframe()
image = PIL.Image.fromarray(frame)
image
```
An alternative mode is YCbCr which is useful for some image processing algorithms or exporting JPEG files. Because we are not changing the number of bits per pixel we can update the colorspace of the input dynamically.
```
hdmi_in.colorspace = COLOR_IN_YCBCR
```
It's probably worth updating the output colorspace as well to avoid the psychedelic effects
```
hdmi_out.colorspace = COLOR_OUT_YCBCR
```
Now we can use PIL to read in the frame and perform the conversion back for us.
```
import PIL.Image
frame = hdmi_in.readframe()
image = PIL.Image.fromarray(frame, "YCbCr")
frame.freebuffer()
image.convert("RGB")
hdmi_out.close()
hdmi_in.close()
```
## Next Steps
This notebook has only provided an overview of base overlay pipeline. One of the reasons for the changes was to make it easier to add hardware accelerated functions by supporting a wider range of pixel formats without software conversion and separating out the HDMI front end from the video DMA. Explore the code in pynq/lib/video.py for more details.
|
github_jupyter
|
## Deep Learning Regularization
😓Be well prepared that when the code worked for me, may not work for you any more. It took me so much time tonight to debug, upgrade/install packages, change deprecated functions or just ignore warnings.... All because of the frequent changes in these open source packages. So, when it's your turn to try the code, who knows whether it still works...
💝However, when you are seeing my code, you are lucky! At least I took the note on those things need to care about, including the solutions.
❣️Also note, the model evaluation here I didn't evauate all the testing data, because of the labeling time for all those testing image can be huge and I'm really busy. <b>However</b>, you can pay attention to those val_acc and val_loss, lower the better
Reference: https://www.analyticsvidhya.com/blog/2018/04/fundamentals-deep-learning-regularization-techniques/?utm_source=feedburner&utm_medium=email&utm_campaign=Feed%3A+AnalyticsVidhya+%28Analytics+Vidhya%29
<b>Get data from here</b>: https://datahack.analyticsvidhya.com/contest/practice-problem-identify-the-digits/
```
%matplotlib inline
import os
import numpy as np
import pandas as pd
from imageio import imread
from sklearn.metrics import accuracy_score
import pylab
import tensorflow as tf
import keras
```
### NOTE
You may got an error saying cannot import module "weakref". This problem was not exist before but just appeared...
Here's my solution:
1. Find your tensorflow path by typing `pip show tensorflow`
2. Find tensorflow/python/util/tf_should_use.py, open it
3. Change `from backports import weakref` to `import weakref`
4. Then comment the line that contains `finalize()` function, this is for garbage collection, but finalize function does not exist in weakref in my case.... 😓
5. Restart your ipython
```
seed = 10
rng = np.random.RandomState(seed)
train = pd.read_csv('digit_recognition/train.csv')
test = pd.read_csv('digit_recognition/test.csv')
train.head()
img_name = rng.choice(train.filename)
training_image_path = 'digit_recognition/Images/train/' + img_name
training_img = imread(training_image_path, as_gray=True)
pylab.imshow(training_img, cmap='gray')
pylab.axis('off')
pylab.show()
training_img[7:9]
# store all images as numpy arrays, to make data manipulation easier
temp = []
for img_name in train.filename:
training_image_path = 'digit_recognition/Images/train/' + img_name
training_img = imread(training_image_path, as_gray=True)
img = training_img.astype('float32')
temp.append(img)
train_x = np.stack(temp)
train_x /= 255.0
train_x = train_x.reshape(-1, 784).astype('float32')
temp = []
for img_name in test.filename:
testing_image_path = 'digit_recognition/Images/test/' + img_name
testing_img = imread(testing_image_path, as_gray=True)
img = testing_img.astype('float32')
temp.append(img)
test_x = np.stack(temp)
test_x /= 255.0
test_x = test_x.reshape(-1, 784).astype('float32')
train_x
train_y = keras.utils.np_utils.to_categorical(train.label.values)
# split into training and validation sets, 7:3
split_size = int(train_x.shape[0]*0.7)
train_x, val_x = train_x[:split_size], train_x[split_size:]
train_y, val_y = train_y[:split_size], train_y[split_size:]
train.label.iloc[split_size:split_size+2]
from keras.models import Sequential
from keras.layers import Dense
# define variables
input_num_units = 784
hidden1_num_units = 500
hidden2_num_units = 500
hidden3_num_units = 500
hidden4_num_units = 500
hidden5_num_units = 500
output_num_units = 10
epochs = 10
batch_size = 128
```
### NOTE
Keras updated to 2.0
Without updating keras, the way you used `Dense()` function may keep giving warnings
* Here's Keras 2.0 documentation: https://keras.io/
* To update keras, type `sudo pip install --upgrade keras==2.1.3`. Has to be keras 2.1.3, if it's higher, softmax may get an error below.... (this is why I hate deep learning when you have to use open source!)
* Holy s**t, even after the updating, you will get many warnings again, just ignore them..
```
# Method 1 - Without Regularization
import warnings
warnings.filterwarnings('ignore')
model = Sequential()
model.add(Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'))
model.add(Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'))
model.add(Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'))
model.add(Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'))
model.add(Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'))
model.add(Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu'))
model.add(Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y))
# one sample evaluation
pred = model.predict_classes(test_x)
img_name = rng.choice(test.filename)
testing_image_path = 'digit_recognition/Images/test/' + img_name
testing_img = imread(testing_image_path, as_gray=True)
test_index = int(img_name.split('.')[0]) - train.shape[0]
print "Prediction is: ", pred[test_index]
pylab.imshow(testing_img, cmap='gray')
pylab.axis('off')
pylab.show()
from keras import regularizers
# Method 2 - With L2 regularizer
model = Sequential([
Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu',
kernel_regularizer=regularizers.l2(0.0001)), # lambda = 0.0001
Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu',
kernel_regularizer=regularizers.l2(0.0001)),
Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu',
kernel_regularizer=regularizers.l2(0.0001)),
Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu',
kernel_regularizer=regularizers.l2(0.0001)),
Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu',
kernel_regularizer=regularizers.l2(0.0001)),
Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y))
# Method 3 - L1 Regularizer
model = Sequential([
Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu',
kernel_regularizer=regularizers.l1(0.0001)),
Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu',
kernel_regularizer=regularizers.l1(0.0001)),
Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu',
kernel_regularizer=regularizers.l1(0.0001)),
Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu',
kernel_regularizer=regularizers.l1(0.0001)),
Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu',
kernel_regularizer=regularizers.l1(0.0001)),
Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y))
# method 4 - Dropout
from keras.layers.core import Dropout
model = Sequential([
Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'),
Dropout(0.25), # the drop probability is 0.25
Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y))
# method 5 - early stopping
from keras.callbacks import EarlyStopping
from keras.layers.core import Dropout
import warnings
warnings.filterwarnings('ignore')
model = Sequential([
Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'),
Dropout(0.25), # the drop probability is 0.25
Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y),
callbacks = [EarlyStopping(monitor='val_acc', patience=2)])
# method 6 - Data Augmentation
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(zca_whitening=True)
# zca_whitening as the argument, will highlight the outline of each digit
train = pd.read_csv('digit_recognition/train.csv')
temp = []
for img_name in train.filename:
training_image_path = 'digit_recognition/Images/train/' + img_name
training_img = imread(training_image_path, as_gray=True)
img = training_img.astype('float32')
temp.append(img)
train_x = np.stack(temp)
# The difference with above starts from here:
train_x = train_x.reshape(train_x.shape[0], 1, 28, 28)
train_x = train_x.astype('float32')
# fit parameters from data
## fit the training data in order to augment
datagen.fit(train_x) # This will often cause the kernel to die on my machine
# data spliting
split_size = int(train_x.shape[0]*0.7)
train_x, val_x = train_x[:split_size], train_x[split_size:]
train_y, val_y = train_y[:split_size], train_y[split_size:]
# train the model with drop out
model = Sequential([
Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'),
Dropout(0.25), # the drop probability is 0.25
Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=hidden5_num_units, input_dim=hidden4_num_units, activation='relu'),
Dropout(0.25),
Dense(output_dim=output_num_units, input_dim=hidden5_num_units, activation='softmax'),
])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
trained_model_5d = model.fit(train_x, train_y, nb_epoch=epochs, batch_size=batch_size, validation_data=(val_x, val_y))
```
### Observations
1. Comparing the val_loss and vall_acc between each regularizer and the first method, we can see dropout works best and it is thr only one that has lower val_loss and higher val_acc.
2. In the experiments here, after we applied early stopping on dropout it didn't give better results, maybe it needs more `patience`, because if we observe each epoch, the val_loss is not simply dropping along the way, it could increase in the middle and then drop again. This is why we need to be careful towards the number of epoch/patience
3. L1, L2 tend to give higher val_loss, especially L1
4. In my machine, with limited memory now, data augmentation failed, it will simply kill the kernel all the time. No wonder dropout is the most frequently used regularizer.....
|
github_jupyter
|
### Maricopa Agricultural Center Season 6
### Citation for Input Trait Data
LeBauer, David et al. (2020), Data From: TERRA-REF, An open reference data set from high resolution genomics, phenomics, and imaging sensors, v6, Dryad, Dataset, https://doi.org/10.5061/dryad.4b8gtht99
##### Environmental weather data can be downloaded from the MAC weather station [website](https://cals.arizona.edu/azmet/06.htm)
Please email [email protected] or [email protected] with any questions or comments, or create an issue in this [repository](https://github.com/genophenoenvo/terraref-datasets)
```
import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import requests
def download_csv(url, folder_name, file_name):
response = requests.get(url)
with open(os.path.join(folder_name, file_name), 'wb') as f:
f.write(response.content)
def read_in_csv(folder_name, file_name):
df = pd.read_csv(folder_name + '/' + file_name, low_memory=False)
return df
def plot_hist(df, value_column, trait_column):
trait_name = df[trait_column].unique()[0]
return df[value_column].hist(color='navy').set_xlabel(trait_name);
def check_for_nulls_duplicates(df):
print(
f'Sum of null values:\n{df.isnull().sum()}\n-----\n'
f'Value counts for duplicates:\n{df.duplicated().value_counts()}'
)
def check_unique_values(df):
for col in df.columns:
if df[col].nunique() < 5:
print(f'{df[col].nunique()} unique value(s) for {col} column: {df[col].unique()}')
else:
print(f'{df[col].nunique()} values for {col} column')
def extract_range_column_values(working_df, plot_column):
new_df = working_df.copy()
new_df['range'] = new_df[plot_column].str.extract("Range (\d+)").astype(int)
new_df['column'] = new_df[plot_column].str.extract("Column (\d+)").astype(int)
return new_df
def convert_datetime_column(working_df, date_column):
new_datetimes = pd.to_datetime(working_df[date_column])
new_df_0 = working_df.drop(labels=date_column, axis=1)
new_df_1 = new_df_0.copy()
new_df_1['date'] = new_datetimes
return new_df_1
def rename_value_column(working_df, value_column, trait_column):
trait = working_df[trait_column].unique()[0]
new_df_0 = working_df.rename({value_column: trait}, axis=1)
new_df_1 = new_df_0.drop(labels=trait_column, axis=1)
return new_df_1
def reorder_columns(working_df, new_col_order_list):
working_df_1 = pd.DataFrame(data=working_df, columns=new_col_order_list)
return working_df_1
def save_to_csv_with_timestamp(df, name_of_dataset):
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
output_filename = ('data/processed/' + f'{name_of_dataset}_' + f'{timestamp}.csv').replace(':', '')
df.to_csv(output_filename, index=False)
def save_to_csv_without_timestamp(list_of_dfs, list_of_output_filenames):
for i,j in zip(list_of_dfs, list_of_output_filenames):
i.to_csv(j, index=False)
```
#### A. Aboveground Dry Biomass
```
folder_name = 'data'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
aboveground_dry_biomass_s6_url = 'https://de.cyverse.org/dl/d/1333BF0F-9462-4F0A-8D35-2B446F0CC989/season_6_aboveground_dry_biomass_manual.csv'
aboveground_dry_biomass_s6_input_filename = 'aboveground_dry_biomass_s6.csv'
download_csv(aboveground_dry_biomass_s6_url, folder_name=folder_name, file_name=aboveground_dry_biomass_s6_input_filename)
adb_0 = read_in_csv(folder_name=folder_name, file_name=aboveground_dry_biomass_s6_input_filename)
# print(adb_0.shape)
# adb_0.head()
# plot_hist(adb_0, 'mean', 'trait')
# check_for_nulls_duplicates(adb_0)
adb_1 = extract_range_column_values(adb_0, 'plot')
# print(adb_1.shape)
# adb_1.sample(n=3)
```
#### Add Blocking Heights
```
bh_s6_url = 'https://de.cyverse.org/dl/d/73900334-1A0F-4C56-8F96-FAC303671431/s6_blocks.csv.txt'
bh_s6_input_filename = 'blocking_heights_s6.csv'
download_csv(bh_s6_url, folder_name=folder_name, file_name=bh_s6_input_filename)
bh_df = read_in_csv(folder_name=folder_name, file_name=bh_s6_input_filename)
# print(bh_df.shape)
# bh_df.head()
# bh_df.height_block.value_counts()
# check_for_nulls_duplicates(bh_df)
bh_df_1 = bh_df.dropna(axis=0, how='all')
# bh_df_1.shape
```
#### Merge blocking heights with aboveground dry biomass dataframe
```
adb_2 = adb_1.merge(bh_df_1, how='left', left_on='plot', right_on='plot')
# print(adb_2.shape)
# adb_2.head(3)
adb_3 = convert_datetime_column(adb_2, 'date')
# print(adb_3.shape)
# adb_3.head(3)
adb_4 = rename_value_column(adb_3, 'mean', 'trait')
# print(adb_4.shape)
# adb_4.tail(3)
cols_to_drop = ['checked', 'author', 'season', 'treatment']
adb_5 = adb_4.drop(labels=cols_to_drop, axis=1)
# print(adb_5.shape)
# adb_5.head(3)
```
##### Add units (kg/ha) column to aboveground dry biomass dataset
```
adb_6 = adb_5.copy()
adb_6['units'] = 'kg/ha'
# print(adb_6.shape)
# adb_6.tail(3)
new_col_order = ['date', 'plot', 'range', 'column', 'scientificname', 'genotype', 'height_block', 'method',
'aboveground_dry_biomass', 'units', 'method_type']
adb_7 = reorder_columns(adb_6, new_col_order)
# print(adb_7.shape)
# adb_7.head(3)
```
#### B. Canopy Height - Sensor
```
canopy_height_s6_url = 'https://de.cyverse.org/dl/d/D069737A-76F3-4B69-A213-4B8811A357C0/season_6_canopy_height_sensor.csv'
canopy_height_s6_input_filename = 'canopy_height_s6.csv'
download_csv(canopy_height_s6_url, folder_name=folder_name, file_name=canopy_height_s6_input_filename)
ch_0 = read_in_csv(folder_name=folder_name, file_name=canopy_height_s6_input_filename)
# print(ch_0.shape)
# ch_0.head()
# check_for_nulls_duplicates(ch_0)
```
#### Drop duplicates
```
ch_1 = ch_0.drop_duplicates(ignore_index=True)
# print(ch_1.shape)
# check_for_nulls_duplicates(ch_1)
ch_2 = extract_range_column_values(ch_1, 'plot')
# print(ch_2.shape)
# ch_2.sample(n=3)
ch_3 = convert_datetime_column(ch_2, 'date')
# print(ch_3.shape)
# ch_3.dtypes
ch_4 = rename_value_column(ch_3, 'mean', 'trait')
# print(ch_4.shape)
# ch_4.tail(3)
# add units (cm) to column name
ch_5 = ch_4.rename({'canopy_height': 'canopy_height_cm'}, axis=1)
# ch_5.sample(n=3)
```
#### Add blocking heights
```
# bh_df_1.head(3)
print(bh_df_1['plot'].nunique())
print(ch_0['plot'].nunique())
```
There is not a height block provided for every plot, so the final canopy height dataframe will contain some nulls.
```
ch_6 = ch_5.merge(bh_df_1, how='left', left_on='plot', right_on='plot')
# print(ch_6.shape)
# ch_6.tail(3)
ch_7 = ch_6.drop(labels=['checked', 'author', 'season', 'treatment'], axis=1)
# print(ch_7.shape)
# ch_7.tail(3)
# ch_7.isnull().sum()
new_col_order = ['date', 'plot', 'range', 'column', 'scientificname', 'genotype', 'method', 'canopy_height_cm',
'height_block', 'method_type']
ch_8 = reorder_columns(ch_7, new_col_order)
# print(ch_8.shape)
# ch_8.head(3)
```
#### IV. Write derived data to csv files
```
list_of_dfs = [adb_7, ch_8]
list_of_file_output_names = ['mac_season_6_aboveground_dry_biomass.csv',
'mac_season_6_canopy_height_sensor.csv']
save_to_csv_without_timestamp(list_of_dfs, list_of_file_output_names)
```
|
github_jupyter
|
# Exploring Observation Data From TILDE, Application to DART Data
## Table of contents
### 1. Introduction
### 2. Building a Query for a specific sensor code/stream
### 3. Building a Query without sensor code/stream
### 4. Building a Query for the latest data
### 5. Building a Query for aggregated data
### 6. Getting the data using ObsPy
## 1. Introduction
In this tutorial we will be learning how to use Python to access the TILDE API `data` endpoint. To highlight the different functionalities and the statistics available we will be using the DART (Deep-ocean Assessment and Reporting of Tsunamis) dataset. Tilde is the GeoNet API (Application Programming Interface) to access DART time series data. You do not need to know anything about APIs to use this tutorial. If you would like more info see https://tilde.geonet.org.nz/.
This tutorial assumes you have basic knowledge of Python.
###### About GeoNet DART data
GeoNet uses the 12 DART Tsunameters deployed offshore New Zealand and around the Southwestern Pacific Ocean to monitor ocean height. When a change has been detected of a certain magnitude, the buoy will "trigger" and go into a heightened detection mode. The DARTs have two operational reporting modes; standard and event. When in standard reporting mode, the BPR (bottom pressure recorder) and buoy system send four six-hour bundles of 15 minute water height values. When in event reporting mode, BPR data are sampled at 15 second intervals and are sent more regularly. The buoy surface location (latitude and longitude) will also be sent daily. <br>
TILDE provides access to the 15 minutes and 15 second sampled data.
For more DART information see the GeoNet page: https://www.geonet.org.nz/tsunami/dart
## 2. Building a Query for a specific sensor code/stream
###### Import required modules and set the source URL
```
import requests
import json
import pandas as pd
import matplotlib.pyplot as plt
from io import StringIO
source = 'https://tilde.geonet.org.nz'
```
### Request data for a specific sensor/stream with date range, and then returning a CSV file
This query returns the observations of the specified data held in TILDE. <br>
The endpoint we are going to use is `https://tilde.geonet.org.nz/v1/data/`.
The minimum required parameters are:
- domain = `dart`
- key = `NZE/water-height/40/WTZ`, this is 15 minute sampled data for the station NZE
- startdate = '2021-03-05'
- enddate = '2021-03-05'
We will ask for data for 2021 March 05.
We begin by setting the URL with these new parameters.
```
url = source+'/v1/data/dart/NZE/water-height/40/WTZ/2021-03-05/2021-03-05'
```
We will now query the URL and ask for a CSV format to be returned
```
r = requests.get(url, headers={'Accept':'text/csv'})
print (r)
```
We use `requests.get` to retrieve the data from the URL. The response status code says whether we were successful in getting the data requested and why not if we were unsuccessful:
<ul>
<li>200 -- everything went okay, and the result has been returned (if any)
<li>301 -- the server is redirecting you to a different endpoint. This can happen when a company switches domain names, or an endpoint name is changed.
<li>400 -- the server thinks you made a bad request. This can happen when you don't send along the right data, among other things.
<li>404 -- the resource you tried to access wasn't found on the server.
</ul>
To work with the observation data we will use python's pandas module (https://pandas.pydata.org/). We will now store the response of our request in a pandas dataframe (`df`), using `pd.read_csv`. By using `parse_dates=['time']` we can convert the 'time' to a datetime and with `index_col` we can set the time as the index of the dataframe. More information on `pd.read_csv` can be found here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html. We need to use the `StringIO` function with the text returned from our query. By printing the beginning of the result (`df.head()`) we can also see the result of this new index column.
```
df = pd.read_csv(StringIO(r.text),parse_dates=['time'], index_col='time')
df.head()
```
#### Data Summary
By using `df.describe` we can summarise the returned data as this features generates descriptive statistics from dataframes. From the result we can see that there are 59 values (and errors), and all of the qc values are currently undefined. By default, we also to get to see the mean, standard deviation, minimum, maximum, and some percentile values.
```
df.describe()
```
#### Basic Data Plot
By using the `value` column from our dataframe we are able to plot the data against time. For this plot we will use dots and a line so periods without data are obvious. As we are currently plotting low rate data (WTZ), when there is a gap of data it is likely we have high rate data (UTZ) for this period instead.
```
fig,ax = plt.subplots(figsize=(15,5))
df['value'].plot(ax=ax, color='#41b0f0', label='low rate (15 mins)')
df['value'].plot(ax=ax, color='blue', marker='.', linestyle='None')
#stop exponential format for y-axis label
ax.ticklabel_format(axis='y', useOffset=False, style='plain')
plt.legend(loc='best')
```
## 3. Building a Query without sensor code/stream
TILDE can also provide all of the available data for a date range without having to specify the sensor code and stream parameters.
The minimum required parameters are:
- domain = `dart`
- key = `NZE/water-height/-/-`
- startdate = '2021-03-05'
- enddate = '2021-03-05'
Using the above parameters will return data from all sensor codes (for NZE there is only sensor code 40 for this date) and all data types (WTZ and UTZ). These will be returned in CSV format.
The available location codes for each station are available in GeoNet's github repository (https://github.com/GeoNet/delta/blob/main/network/sites.csv). Sensor codes will be provided in the `Location` column (and for DART those will be either 40 or 41). <br>
We will begin by following similar steps to Section 2 and changing the URL with the new parameters, quering the URL and asking for the CSV format to be returned.
```
url = source+'/v1/data/dart/NZE/water-height/-/-/2021-03-05/2021-03-05'
r = requests.get(url, headers={'Accept':'text/csv'})
df = pd.read_csv(StringIO(r.text),parse_dates=['time'], index_col='time')
```
##### Sorted by type not date
By printing the top 2 rows of the dataframe (`df.head(2)`) and the bottom 2 rows (`df.tail(2)`), we can see that the data returned is sorted by type (UTZ/WTZ) and not by date.
```
df.head(2)
df.tail(2)
```
#### Separate UTZ and WTZ data into different dataframes
<br>
By separating into two distinct dataframes using the `type` column we can then separate out the UTZ and the WTZ values.
```
dfw = df[df['type']=='WTZ']
dfu = df[df['type']=='UTZ']
```
#### Basic visualization
<br>
As the two datatypes (UTZ and WTZ) have now been separated, we can plot them with different colours. This is similar to the plot above, but now it is possible to see the low rate and the high rate data and how they fit together.
```
fig,ax = plt.subplots(figsize=(15,5))
ax.plot(dfw['value'], color='#41b0f0', label='low rate (15 mins)')
ax.plot(dfw['value'], color='blue', marker='.', linestyle='None', label='low rate (15 mins)')
ax.plot(dfu['value'], color='#f07b41', marker='.', linestyle='None', label='high rate (15 secs)')
#stop exponential format for y-axis label
ax.ticklabel_format(axis='y', useOffset=False, style='plain')
plt.legend(loc='best')
```
## 4. Building a Query for the latest data
This query returns the observations of the specified data held in TILDE. The query is `https://tilde.geonet.org.nz/v1/data/`.
The minimum required parameters are:
- domain = `dart`
- key = `NZE/water-height/40/WTZ`, this is 15 minute sampled data
- startdate = 'latest'
- enddate = '30d'
We will begin by following similar steps that we have followed previously by, changing the URL with the new parameters, quering the URL and asking for the CSV format to be returned. This request will return data in a CSV format for the last 30 days.
```
url = source+'/v1/data/dart/NZE/water-height/40/WTZ/latest/30d'
r = requests.get(url, headers={'Accept':'text/csv'})
df = pd.read_csv(StringIO(r.text),parse_dates=['time'], index_col='time')
```
We can see in the tail of the dataframe that we have the `latest` or most recent data.
```
df.tail()
```
#### Data volume
<br>
By using `len(df.index)` we can easily generate a row count of the dataframe. This could be useful to see how many values we received in a certain period of time for a station. As we are looking at the low rate data, this is likely to be quite predictable, as this is the data that we regularly receive, however, for the high rate (triggered) data we would likely expect much fewer values, unless there has been a lot of recent activity. In this case, for the last 30 days we have received nearly 3000 observations of low rate data.
```
len(df.index)
```
#### Basic visualization of the data
<br>
Plotting the low rate data over the 30 day period allows us to visualise the different tidal periods.
```
fig,ax = plt.subplots(figsize=(15,5))
df['value'].plot(ax=ax, color='#41b0f0', label='low rate (15 mins)')
#stop exponential format for y-axis label
ax.ticklabel_format(axis='y', useOffset=False, style='plain')
plt.legend(loc='best')
```
## 5. Building a query for aggregated data
<br>
When requesting a large amount of data over a long time period, the query can be optimized for quick visualisation using the optional `aggregationPeriod`and `aggregationFunction` parameters. We will use the same example as above, but for a 8 month range (2021-01-01 to 2021-08-31) and use an aggregation period of 1 day to return a daily average of the values. Notice that, due to the aggregation, our dataframe's index column has time values 00:00:00.
```
url = source+'/v1/data/dart/NZE/water-height/-/-/2021-01-01/2021-08-31?aggregationPeriod=1d&aggregationFunction=avg'
r = requests.get(url, headers={'Accept':'text/csv'})
df = pd.read_csv(StringIO(r.text),parse_dates=['time'], index_col='time')
df.tail(5)
fig,ax = plt.subplots(figsize=(15,5))
df['value'].plot(ax=ax, color='#41b0f0', label='low rate (15 mins)')
ax.ticklabel_format(axis='y', useOffset=False, style='plain')
plt.legend(loc='best')
```
## 6. Getting the data using ObsPy
ObsPy (https://github.com/obspy/obspy/wiki) is a python module used for analysis of seismological data.
By getting the data into ObsPy we can use all of the functionality that comes with it. To enable us to do this, we will use the TSPAIR format. More information on this can be found here: https://docs.obspy.org/packages/autogen/obspy.io.ascii.core._write_tspair.html
To begin, we will create a dataframe column that is formatted as needed. A change of formatting is required for the time series file as obspy modules can't read it as it is, so we will change the format to be like this: YYYY-MM-DDTHH:MM:SS ('%Y-%m-%dT%H:%M:%S').
```
#importing the obspy read module
from obspy import read
url = source+'/v1/data/dart/NZE/water-height/40/WTZ/latest/30d'
r = requests.get(url, headers={'Accept':'text/csv'})
df = pd.read_csv(StringIO(r.text),parse_dates=['time'], index_col='time')
df['tseries'] = df.index.strftime('%Y-%m-%dT%H:%M:%S')
#print(df['tseries'])
```
Next we need to generate a header for the time-series file, where we specify a few parameters. This is required so that ObsPy can read the file, and has important data such as the sampling rate.
TSPAIR is a simple ASCII time series format. Each continuous time series segment (no gaps or overlaps) is represented with a header line followed by data samples in time-sample pairs. There are no restrictions on how the segments are organized into files, a file might contain a single segment or many, concatenated segments either for the same channel or many different channels.
Header lines have the general form: TIMESERIES SourceName, # samples, # sps, Time, Format, Type, Units.
The sourcename should be of the format `Net_Sta_Loc_Chan_Qual`, so for NZE this is `NZ_NZE_40_WTZ_R`. For the number of samples, we can use `len(df.index)` as we used above. For number of samples per second, as we are using low rate data (WTZ) this would be 15 minutes or 0.0011111 samples per second. For time, we are using the time dataframe column that we generated above. The format is TSPAIR, the datatype is a float and the units are in `mm`.
```
sourcename = 'NZ_NZE_40_WTZ_R'
samples = len(df.index)
sps = 0.0011111111111111111
time = df['tseries'][0]
dformat = 'TSPAIR'
dtype = 'FLOAT'
units = 'mm'
headerstr = 'TIMESERIES '+sourcename+', '+str(samples)+' samples, '+str(sps)+' sps, '+time+', TSPAIR, FLOAT, mm\n'
```
First we open a new file called tspair.dat, then we write the appropriate header string and then using `df.to_csv` we can write the time-series data to the same file. Finally, we close the file.
```
f = open('tspair.dat', 'w')
f.write(headerstr)
df.to_csv(f, columns=['tseries', 'value'], sep=' ', index=False, header=False, mode='a')
f.close()
```
We can now read the file `tspair.dat`as an Obspy stream using `read()`, where the file format is TSPAIR as specified when we generated the header string.
```
st = read('tspair.dat', format='TSPAIR')
```
From this string we can then pull out the first trace (`tr`) of the stream and print it's statistics. These statistics are generated from the header string that we made beforehand and is why it is important that those details are correct.
```
tr=st[0]
tr.stats
```
As a final step, we can also plot this trace and see how it compares to the waveform that we generated at the end of Section 4.
```
tr.plot()
```
|
github_jupyter
|
## _*Quantum SVM (variational method)*_
The QSVMKernel notebook here demonstrates a kernel based approach. This notebook shows a variational method.
For further information please see: [https://arxiv.org/pdf/1804.11326.pdf](https://arxiv.org/pdf/1804.11326.pdf)
**This notebook shows the SVM implementation based on the variational method.**
In this file, we show two ways for using the quantum variational method: (1) the non-programming way and (2) the programming way.
### Part I: non-programming way.
In the non-programming way, we config a json-like configuration, which defines how the svm instance is internally constructed. After the execution, it returns the json-like output, which carries the important information (e.g., the details of the svm instance) and the processed results.
```
from datasets import *
from qiskit_aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name
from qiskit import Aer
from qiskit_aqua.input import SVMInput
from qiskit_aqua import run_algorithm, QuantumInstance
from qiskit_aqua.algorithms import QSVMVariational
from qiskit_aqua.components.optimizers import SPSA
from qiskit_aqua.components.feature_maps import SecondOrderExpansion
from qiskit_aqua.components.variational_forms import RYRZ
```
First we prepare the dataset, which is used for training, testing and the finally prediction.
*Note: You can easily switch to a different dataset, such as the Breast Cancer dataset, by replacing 'ad_hoc_data' to 'Breast_cancer' below.*
```
feature_dim = 2 # dimension of each data point
training_dataset_size = 20
testing_dataset_size = 10
random_seed = 10598
shots = 1024
sample_Total, training_input, test_input, class_labels = ad_hoc_data(training_size=training_dataset_size,
test_size=testing_dataset_size,
n=feature_dim, gap=0.3, PLOT_DATA=True)
datapoints, class_to_label = split_dataset_to_data_and_labels(test_input)
print(class_to_label)
```
Now we create the svm in the non-programming way.
In the following json, we config:
- the algorithm name
- the variational form
- the feature map
- the optimizer
```
params = {
'problem': {'name': 'svm_classification', 'random_seed': 10598},
'algorithm': {'name': 'QSVM.Variational', 'override_SPSA_params': True},
'backend': {'shots': 1024},
'optimizer': {'name': 'SPSA', 'max_trials': 200, 'save_steps': 1},
'variational_form': {'name': 'RYRZ', 'depth': 3},
'feature_map': {'name': 'SecondOrderExpansion', 'depth': 2}
}
svm_input = SVMInput(training_input, test_input, datapoints[0])
backend = Aer.get_backend('qasm_simulator')
```
With everything setup, we can now run the algorithm.
For the testing, the result includes the details and the success ratio.
For the prediction, the result includes the predicted labels.
```
result = run_algorithm(params, svm_input, backend=backend)
print("testing success ratio: ", result['testing_accuracy'])
print("predicted classes:", result['predicted_classes'])
```
### Part II: programming way.
We construct the svm instance directly from the classes. The programming way offers the users better accessibility, e.g., the users can access the internal state of svm instance or invoke the methods of the instance.
Now we create the svm in the programming way.
- we build the optimizer instance (required by the svm instance) by instantiating the class SPSA.
- We build the feature map instance (required by the svm instance) by instantiating the class SecondOrderExpansion.
- We build the varitional form instance (required by the svm instance) by instantiating the class RYRZ.
- We build the svm instance by instantiating the class QSVMVariational.
```
backend = Aer.get_backend('qasm_simulator')
optimizer = SPSA(max_trials=100, c0=4.0, skip_calibration=True)
optimizer.set_options(save_steps=1)
feature_map = SecondOrderExpansion(num_qubits=feature_dim, depth=2)
var_form = RYRZ(num_qubits=feature_dim, depth=3)
svm = QSVMVariational(optimizer, feature_map, var_form, training_input, test_input)
quantum_instance = QuantumInstance(backend, shots=shots, seed=random_seed, seed_mapper=random_seed)
```
Now we run it.
```
result = svm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
```
Different from the non-programming way, the programming way allows the users to invoke APIs upon the svm instance directly. In the following, we invoke the API "predict" upon the trained svm instance to predict the labels for the newly provided data input.
Use the trained model to evaluate data directly, and we store a label_to_class and class_to_label for helping converting between label and class name
```
predicted_probs, predicted_labels = svm.predict(datapoints[0])
predicted_classes = map_label_to_class_name(predicted_labels, svm.label_to_class)
print("prediction: {}".format(predicted_labels))
```
|
github_jupyter
|
```
import xai
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, confusion_matrix, precision_score, recall_score, roc_curve, auc, accuracy_score
from AutoAIAlgorithm.ParticleSwarmOptimization import PSO
df = pd.read_csv("data/covid-19.csv")
columns_to_delete = ['Patient ID', 'Patient addmited to semi-intensive unit (1=yes, 0=no)',
'Patient addmited to intensive care unit (1=yes, 0=no)',
'Patient addmited to regular ward (1=yes, 0=no)',
'Metapneumovirus', 'Respiratory Syncytial Virus', 'Influenza A',
'Influenza B', 'Parainfluenza 1', 'CoronavirusNL63', 'Rhinovirus/Enterovirus',
'Mycoplasma pneumoniae', 'Coronavirus HKU1', 'Parainfluenza 3', 'Chlamydophila pneumoniae',
'Adenovirus', 'Parainfluenza 4', 'Coronavirus229E', 'CoronavirusOC43', 'Inf A H1N1 2009',
'Bordetella pertussis', 'Metapneumovirus', 'Parainfluenza 2', 'Influenza B, rapid test',
'Influenza A, rapid test', 'Strepto A']
df = df.drop(columns_to_delete, axis=1)
df_no_nan = df.dropna(subset=['Hematocrit'])
df_clean = df_no_nan.loc[:, df_no_nan.isnull().mean() < 0.7]
df_clean
ims = xai.imbalance_plot(df_clean, "SARS-Cov-2 exam result")
bal_df = xai.balance(df_clean, "SARS-Cov-2 exam result", upsample=0.4)
y = np.asarray(bal_df['SARS-Cov-2 exam result'])
y = [1 if x == 'positive' else 0 for x in y]
X = bal_df.drop(['SARS-Cov-2 exam result'], axis=1)
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
idf = pd.DataFrame(imputer.fit_transform(X))
idf.columns=X.columns
idf.index=X.index
X = idf
X
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
num_particles=10
num_iterations=10
pso = PSO(particle_count=num_particles, distance_between_initial_particles=1.0, evaluation_metric=f1_score)
best_metric, best_model = pso.fit(X_train=x_train,
X_test=x_test,
Y_train=y_train,
Y_test=y_test,
maxiter=num_iterations,
verbose=True,
max_distance=0.05)
best_model
best_metric
y_pred = best_model.predict(x_test)
import seaborn as sn
df_cm = pd.DataFrame(confusion_matrix(y_test, y_pred), ["negative", "positive"], ["negative", "positive"])
# plt.figure(figsize=(10,7))
sn.set(font_scale=1.4) # for label size
sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, cmap="Blues", fmt='d') # font size
plt.show()
accuracy_score(y_test, y_pred)
precision_score(y_test, y_pred)
recall_score(y_test, y_pred)
f1_score(y_test, y_pred)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
auc(fpr, tpr)
def get_avg(x, y):
return f1_score(y, best_model.predict(x))
imp = xai.feature_importance(x_test, y_test, get_avg)
imp.head()
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/Eoli-an/Exam-topic-prediction/blob/main/Slides_vs_Transcribes_Frequency.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Plot for Dense Ranks of Word Usage in Slides and Transcribes of Relevant Words
For this plot we analyse the relationship between the word frequency of the slides versus the word frequency of the transcribes of the lecture. We only analyse hand picked words that are relevant for predicting exam topics or their difficulties.
```
!pip install scattertext
!pip install tika
!pip install textblob
import pandas as pd
import glob
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import scattertext as st
from tika import parser
from textblob import TextBlob
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('brown')
```
The Slides are expected to be in a folder called Slides. The Transcibes are expected to be in a folder called Transcribes
```
lectures_spoken = []
path = 'Transcribes/*.txt'
files=glob.glob(path)
for file in sorted(files):
with open(file, 'r') as f:
lectures_spoken.append(f.read())
lectures_spoken = " ".join(lectures_spoken)
lectures_pdf = []
path = 'Slides/*.pdf'
files=glob.glob(path)
for file in sorted(files):
lectures_pdf.append(parser.from_file(file)["content"])
lectures_pdf = " ".join(lectures_pdf)
```
Create a texblob of the text. This is used to extract the noun phrases.
```
blob_spoken = TextBlob(lectures_spoken)
freq_spoken = nltk.FreqDist(blob_spoken.noun_phrases)
blob_pdf = TextBlob(lectures_pdf)
freq_pdf = nltk.FreqDist(blob_pdf.noun_phrases)
```
This function checks if a noun phrase is sufficiently similar to a relevant word(templates). Sufficiently similar is defined as that the template is a substring of the noun phrase.
```
def convert_to_template(df_element, template):
for template_element in template:
if template_element in df_element:
return template_element
return "None"
```
We first create a pandas dataframe of all the noun phrases and their frequencies in both slides and transcribes. After that, we extract all words that are similar to a relevant word (as of the convert_to_template function). Then we group by the relevant words
```
relevant_words = ['bayes', 'frequentist', 'fairness', 'divergence', 'reproduc', 'regulariz', 'pca', 'principal c' 'bootstrap', 'nonlinear function', 'linear function', 'entropy', 'maximum likelihood estimat', 'significa', 'iid', 'bayes theorem', 'visualization', 'score function', 'dimensionality reduction', 'estimat', 'bayes', 'consumption', 'fisher', 'independence', 'logistic regression', 'bias', 'standard deviation', 'linear discriminant analysis', 'information matrix', 'null hypothesis', 'log likelihood', 'linear regression', 'hypothesis test', 'confidence', 'variance', 'sustainability', 'gaussian', 'linear model', 'climate', 'laplace', ]
df_spoken = pd.DataFrame.from_dict({"word": list(freq_spoken.keys()), "freq_spoken" : list(freq_spoken.values())})
df_pdf = pd.DataFrame.from_dict({"word": list(freq_pdf.keys()), "freq_pdf" : list(freq_pdf.values())})
df = df_spoken.merge(df_pdf,how="outer",on="word")
df["word"] = df["word"].apply(lambda x: convert_to_template(x,relevant_words))
df = df.groupby(["word"]).sum().reset_index()
df = df[df["word"] != "None"].reset_index()
```
We use the dense_rank functionality of the scattertext library to convert the absolute number of occurances of a word to a dense rank. This means that we only consider the relative order of the frequencies of the word and discard all information that tells us how far apart two word frequencies are.
```
df["freq_spoken"] = st.Scalers.dense_rank(df["freq_spoken"])
df["freq_pdf"] = st.Scalers.dense_rank(df["freq_pdf"])
df
plt.figure(figsize=(20,12))
sns.set_theme(style="dark")
p1 = sns.scatterplot(x='freq_spoken', # Horizontal axis
y='freq_pdf', # Vertical axis
data=df, # Data source
s = 80,
legend=False,
color="orange",
#marker = "s"
)
for line in range(0,df.shape[0]):
if line == 6:#divergence
p1.text(df.freq_spoken[line]-0.12, df.freq_pdf[line]-0.007,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 21:#linear regression
p1.text(df.freq_spoken[line]-0.18, df.freq_pdf[line]-0.007,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 18:#linear discriminant analysis
p1.text(df.freq_spoken[line]-0.05, df.freq_pdf[line]-0.05,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 19:#linear function
p1.text(df.freq_spoken[line]-0.02, df.freq_pdf[line]-0.04,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 29:#reproduce
p1.text(df.freq_spoken[line]-0.03, df.freq_pdf[line]+0.03,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 12:#gaussian:
p1.text(df.freq_spoken[line]-0.1, df.freq_pdf[line]-0.007,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 16:#information matrix:
p1.text(df.freq_spoken[line]+0.01, df.freq_pdf[line]-0.025,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 25:#nonlinear function:
p1.text(df.freq_spoken[line]+0.01, df.freq_pdf[line]-0.025,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 24:#maximum likelihood estimat:
p1.text(df.freq_spoken[line]-0.07, df.freq_pdf[line]+0.02,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
elif line == 17:#laplace:
p1.text(df.freq_spoken[line]-0.08, df.freq_pdf[line]-0.007,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
else:
p1.text(df.freq_spoken[line]+0.01, df.freq_pdf[line]-0.007,
df.word[line], horizontalalignment='left',
size='xx-large', color='black', weight='normal')
#plt.title('Dense Ranks of Word Usage in Slides and Transcribes of Relevant Words',size = "xx-large")
# Set x-axis label
plt.xlabel('Transcribes Frequency',size = "xx-large")
# Set y-axis label
plt.ylabel('Slides Frequency',size = "xx-large")
p1.set_xticks([0,0.5,1]) # <--- set the ticks first
p1.set_xticklabels(["Infrequent", "Average", "Frequent"],size = "x-large")
p1.set_yticks([0,0.5,1]) # <--- set the ticks first
p1.set_yticklabels(["Infrequent", "Average", "Frequent"],size = "x-large")
plt.show()
```
|
github_jupyter
|
## Guest Lecture COMP7230
# Using Python packages for Linked Data & spatial data
#### by Dr Nicholas Car
This Notebook is the resource used to deliver a guest lecture for the [Australian National University](https://www.anu.edu.au)'s course [COMP7230](https://programsandcourses.anu.edu.au/2020/course/COMP7230): *Introduction to Programming for Data Scientists*
Click here to run this lecture in your web browser:
[](https://mybinder.org/v2/gh/nicholascar/comp7230-training/HEAD?filepath=lecture_01.ipynb)
## About the lecturer
**Nicholas Car**:
* PhD in informatics for irrigation
* A former CSIRO informatics researcher
* worked on integrating environmental data across government / industry
* developed data standards
* Has worked in operation IT in government
* Now in a private IT consulting company, [SURROUND Australia Pty Ltd](https://surroundaustralia.com) supplying Data Science solutions
Relevant current work:
* building data processing systems for government & industry
* mainly using Python
* due to its large number of web and data science packages
* maintains the [RDFlib](https://rdflib.net) Python toolkit
* for processing [RDF](https://en.wikipedia.org/wiki/Resource_Description_Framework)
* co-chairs the [Australian Government Linked Data Working Group](https://www.linked.data.gov.au) with Armin Haller
* plans for multi-agency data integration
* still developing data standards
* in particular GeoSPARQL 1.1 (https://opengeospatial.github.io/ogc-geosparql/geosparql11/spec.html)
* for graph representations of spatial information
## 0. Lecture Outline
1. Notes about this training material
2. Accessing RDF data
3. Parsing RDF data
4. Data 'mash up'
5. Data Conversions & Display
## 1. Notes about this training material
#### This tool
* This is a Jupyter Notebook - interactive Python scripting
* You will cover Jupyter Notebooks more, later in this course
* Access this material online at:
* GitHub: <https://github.com/nicholascar/comp7230-training>
[](https://mybinder.org/v2/gh/nicholascar/comp7230-training/?filepath=lecture_01.ipynb)
#### Background data concepts - RDF
_Nick will talk RDF using these web pages:_
* [Semantic Web](https://www.w3.org/standards/semanticweb/) - the concept
* [RDF](https://en.wikipedia.org/wiki/Resource_Description_Framework) - the data model
* refer to the RDF image below
* [RDFlib](https://rdflib.net) - the (Python) toolkit
* [RDFlib training Notebooks are available](https://github.com/surroundaustralia/rdflib-training)
The LocI project:
* The Location Index project: <http://loci.cat>
RDF image, from [the RDF Primer](https://www.w3.org/TR/rdf11-primer/), for discussion:

Note that:
* _everything_ is "strongly" identified
* including all relationships
* unlike lots of related data
* many of the identifiers resolve
* to more info (on the web)
## 2. Accessing RDF data
* Here we use an online structured dataset, the Geocoded National Address File for Australia
* Dataset Persistent Identifier: <https://linked.data.gov.au/dataset/gnaf>
* The above link redirects to the API at <https://gnafld.net>
* GNAF-LD Data is presented according to *Linked Data* principles
* online
* in HTML & machine-readable form, RDF
* RDF is a Knowledge Graph: a graph containing data + model
* each resource is available via a URI
* e.g. <https://linked.data.gov.au/dataset/gnaf/address/GAACT714845933>

2.1. Get the Address GAACT714845933 using the *requests* package
```
import requests # NOTE: you must have installed requests first, it's not a standard package
r = requests.get(
"https://linked.data.gov.au/dataset/gnaf/address/GAACT714845933"
)
print(r.text)
```
2.2 Get machine-readable data, RDF triples
Use HTTP Content Negotiation
Same URI, different *format* of data
```
r = requests.get(
"https://linked.data.gov.au/dataset/gnaf/address/GAACT714845933",
headers={"Accept": "application/n-triples"}
)
print(r.text)
```
2.3 Get machine-readable data, Turtle
Easier to read
```
r = requests.get(
"https://linked.data.gov.au/dataset/gnaf/address/GAACT714845933",
headers={"Accept": "text/turtle"}
)
print(r.text)
```
## 3. Parsing RDF data
Import the RDFlib library for manipulating RDF data
Add some namespaces to shorten URIs
```
import rdflib
from rdflib.namespace import RDF, RDFS
GNAF = rdflib.Namespace("http://linked.data.gov.au/def/gnaf#")
ADDR = rdflib.Namespace("http://linked.data.gov.au/dataset/gnaf/address/")
GEO = rdflib.Namespace("http://www.opengis.net/ont/geosparql#")
print(GEO)
```
Create a graph and add the namespaces to it
```
g = rdflib.Graph()
g.bind("gnaf", GNAF)
g.bind("addr", ADDR)
g.bind("geo", GEO)
```
Parse in the machine-readable data from the GNAF-LD
```
r = requests.get(
"https://linked.data.gov.au/dataset/gnaf/address/GAACT714845933",
headers={"Accept": "text/turtle"}
)
g.parse(data=r.text, format="text/turtle")
```
Print graph length (no. of triples) to check
```
print(len(g))
```
Print graph content, in Turtle
```
print(g.serialize(format="text/turtle").decode())
```
### 3.1 Getting multi-address data:
3.1.1. Retrieve an index of 10 addresses, in RDF
3.1.2. For each address in the index, get each Address' data
* use paging URI: <https://linked.data.gov.au/dataset/gnaf/address/?page=1>
3.1.3. Get only the street address and map coordinates
#### 3.1.1. Retrieve index
```
# clear the graph
g = rdflib.Graph()
r = requests.get(
"https://linked.data.gov.au/dataset/gnaf/address/?page=1",
headers={"Accept": "text/turtle"}
)
g.parse(data=r.text, format="text/turtle")
print(len(g))
```
#### 3.1.2. Parse in each address' data
```
for s, p, o in g.triples((None, RDF.type, GNAF.Address)):
print(s.split("/")[-1])
r = requests.get(
str(s),
headers={"Accept": "text/turtle"}
)
g.parse(data=r.text, format="turtle")
print(len(g))
```
The graph model used by the GNAF-LD is based on [GeoSPARQL 1.1](https://opengeospatial.github.io/ogc-geosparql/geosparql11/spec.html) and looks like this:

#### 3.1.3. Extract (& print) street address text & coordinates
(CSV)
```
addresses_tsv = "GNAF ID\tAddress\tCoordinates\n"
for s, p, o in g.triples((None, RDF.type, GNAF.Address)):
for s2, p2, o2 in g.triples((s, RDFS.comment, None)):
txt = str(o2)
for s2, p2, o2 in g.triples((s, GEO.hasGeometry, None)):
for s3, p3, o3 in g.triples((o2, GEO.asWKT, None)):
coords = str(o3).replace("<http://www.opengis.net/def/crs/EPSG/0/4283> ", "")
addresses_tsv += "{}\t{}\t{}\n".format(str(s).split("/")[-1], txt, coords)
print(addresses_tsv)
```
#### 3.1.4. Convert CSV data to PANDAS DataFrame
(CSV)
```
import pandas
from io import StringIO
s = StringIO(addresses_tsv)
df1 = pandas.read_csv(s, sep="\t")
print(df1)
```
#### 3.1.5. SPARQL querying RDF data
A graph query, similar to a database SQL query, can traverse the graph and retrieve the same details as the multiple
loops and Python code above in 3.1.3.
```
q = """
SELECT ?id ?addr ?coords
WHERE {
?uri a gnaf:Address ;
rdfs:comment ?addr .
?uri geo:hasGeometry/geo:asWKT ?coords_dirty .
BIND (STRAFTER(STR(?uri), "address/") AS ?id)
BIND (STRAFTER(STR(?coords_dirty), "4283> ") AS ?coords)
}
ORDER BY ?id
"""
for r in g.query(q):
print("{}, {}, {}".format(r["id"], r["addr"], r["coords"]))
```
## 4. Data 'mash up'
Add some fake data to the GNAF data - people count per address.
The GeoSPARQL model extension used is:

Note that for real Semantic Web work, the `xxx:` properties and classes would be "properly defined", removing any ambiguity of use.
```
import pandas
df2 = pandas.read_csv('fake_data.csv')
print(df2)
```
Merge DataFrames
```
df3 = pandas.merge(df1, df2)
print(df3.head())
```
## 5. Spatial Data Conversions & Display
Often you will want to display or export data.
#### 5.1 Display directly in Jupyter
Using standard Python plotting (matplotlib).
First, extract addresses, longitudes & latitudes into a dataframe using a SPARQL query to build a CSV string.
```
import re
addresses_csv = "Address,Longitude,Latitude\n"
q = """
SELECT ?addr ?coords
WHERE {
?uri a gnaf:Address ;
rdfs:comment ?addr .
?uri geo:hasGeometry/geo:asWKT ?coords .
BIND (STRAFTER(STR(?uri), "address/") AS ?id)
BIND (STRAFTER(STR(?coords_dirty), "4283> ") AS ?coords)
}
ORDER BY ?id
"""
for r in g.query(q):
match = re.search("POINT\((\d+\.\d+)\s(\-\d+\.\d+)\)", r["coords"])
long = float(match.group(1))
lat = float(match.group(2))
addresses_csv += f'\"{r["addr"]}\",{long},{lat}\n'
print(addresses_csv)
```
Read the CSV into a DataFrame.
```
import pandas as pd
from io import StringIO
addresses_df = pd.read_csv(StringIO(addresses_csv))
print(addresses_df["Longitude"])
```
Display the first 5 rows of the DataFrame directly using matplotlib.
```
from matplotlib import pyplot as plt
addresses_df[:5].plot(kind="scatter", x="Longitude", y="Latitude", s=50, figsize=(10,10))
for i, label in enumerate(addresses_df[:5]):
plt.annotate(addresses_df["Address"][i], (addresses_df["Longitude"][i], addresses_df["Latitude"][i]))
plt.show()
```
#### 5.2 Convert to common format - GeoJSON
Import Python conversion tools (shapely).
```
import shapely.wkt
from shapely.geometry import MultiPoint
import json
```
Loop through the graph using ordinary Python loops, not a query.
```
points_list = []
for s, p, o in g.triples((None, RDF.type, GNAF.Address)):
for s2, p2, o2 in g.triples((s, GEO.hasGeometry, None)):
for s3, p3, o3 in g.triples((o2, GEO.asWKT, None)):
points_list.append(
shapely.wkt.loads(str(o3).replace("<http://www.opengis.net/def/crs/EPSG/0/4283> ", ""))
)
mp = MultiPoint(points=points_list)
geojson = shapely.geometry.mapping(mp)
print(json.dumps(geojson, indent=4))
```
Another, better, GeoJSON export - including Feature information.
First, build a Python dictionary matching the GeoJSON specification, then export it to JSON.
```
geo_json_features = []
# same query as above
for r in g.query(q):
match = re.search("POINT\((\d+\.\d+)\s(\-\d+\.\d+)\)", r["coords"])
long = float(match.group(1))
lat = float(match.group(2))
geo_json_features.append({
"type": "Feature",
"properties": { "name": r["addr"] },
"geometry": {
"type": "Point",
"coordinates": [ long, lat ]
}
})
geo_json_data = {
"type": "FeatureCollection",
"name": "test-points-short-named",
"crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },
"features": geo_json_features
}
import json
geo_json = json.dumps(geo_json_data, indent=4)
print(geo_json)
```
Export the data and view it in a GeoJSON map viewer, such as http://geojsonviewer.nsspot.net/ or QGIS (desktop_.
## Concluding remarks
* Semantic Web, realised through Linked Data, builds a global machine-readable data system
* the RDF data structure is used
* to link things
* to define things, and the links
* specialised parts of the Sem Web can represent a/any domain
* e.g. spatial
* e.g. Addresses
* powerful graph pattern matching queries, SPARQL, can be used to subset (federated) Sem Web data
* RDF manipulation libraries exist
* can convert to other, common forms, e.g. CSV GeoJSON
* _do as much data science work as you can with well-defined models!_
## License
All the content in this repository is licensed under the [CC BY 4.0 license](https://creativecommons.org/licenses/by/4.0/). Basically, you can:
* copy and redistribute the material in any medium or format
* remix, transform, and build upon the material for any purpose, even commercially
You just need to:
* give appropriate credit, provide a link to the license, and indicate if changes were made
* not apply legal terms or technological measures that legally restrict others from doing anything the license permits
## Contact Information
**Dr Nicholas J. Car**<br />
*Data Systems Architect*<br />
[SURROUND Australia Pty Ltd](https://surroundaustralia.com)<br />
<[email protected]><br />
GitHub: [nicholascar](https://github.com/nicholascar)<br />
ORCID: <https://orcid.org/0000-0002-8742-7730><br />
|
github_jupyter
|
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Classificação de texto com avaliações de filmes
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Veja em TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/pt-br/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Execute em Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/pt-br/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Veja fonte em GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/pt-br/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Baixe o notebook</a>
</td>
</table>
Note: A nossa comunidade TensorFlow traduziu estes documentos. Como as traduções da comunidade são *o melhor esforço*, não há garantias de que sejam uma reflexão exata e atualizada da [documentação oficial em Inglês](https://www.tensorflow.org/?hl=en). Se tem alguma sugestão para melhorar esta tradução, por favor envie um pull request para o repositório do GitHub [tensorflow/docs](https://github.com/tensorflow/docs). Para se voluntariar para escrever ou rever as traduções da comunidade, contacte a [lista [email protected]](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
Este *notebook* classifica avaliações de filmes como **positiva** ou **negativa** usando o texto da avaliação. Isto é um exemplo de classificação *binária* —ou duas-classes—, um importante e bastante aplicado tipo de problema de aprendizado de máquina.
Usaremos a base de dados [IMDB](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) que contém avaliaçòes de mais de 50000 filmes do bando de dados [Internet Movie Database](https://www.imdb.com/). A base é dividida em 25000 avaliações para treinamento e 25000 para teste. Os conjuntos de treinamentos e testes são *balanceados*, ou seja, eles possuem a mesma quantidade de avaliações positivas e negativas.
O notebook utiliza [tf.keras](https://www.tensorflow.org/guide/keras), uma API alto-nível para construir e treinar modelos com TensorFlow. Para mais tutoriais avançados de classificação de textos usando `tf.keras`, veja em [MLCC Text Classification Guide](https://developers.google.com/machine-learning/guides/text-classification/).
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
print(tf.__version__)
```
## Baixe a base de dados IMDB
A base de dados vem empacotada com TensorFlow. Ela já vem pré-processada de forma que as avaliações (sequências de palavras) foram convertidas em sequências de inteiros, onde cada inteiro representa uma palavra específica no dicionário.
O código abaixo baixa a base de dados IMDB para a sua máquina (ou usa a cópia em *cache*, caso já tenha baixado):"
```
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
```
O argumento `num_words=10000` mantém as 10000 palavras mais frequentes no conjunto de treinamento. As palavras mais raras são descartadas para preservar o tamanho dos dados de forma maleável.
## Explore os dados
Vamos parar um momento para entender o formato dos dados. O conjunto de dados vem pré-processado: cada exemplo é um *array* de inteiros representando as palavras da avaliação do filme. Cada *label* é um inteiro com valor ou de 0 ou 1, onde 0 é uma avaliação negativa e 1 é uma avaliação positiva.
```
print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels)))
```
O texto das avaliações foi convertido para inteiros, onde cada inteiro representa uma palavra específica no dicionário. Isso é como se parece a primeira revisão:
```
print(train_data[0])
```
As avaliações dos filmes têm diferentes tamanhos. O código abaixo mostra o número de palavras da primeira e segunda avaliação. Sabendo que o número de entradas da rede neural tem que ser de mesmo também, temos que resolver isto mais tarde.
```
len(train_data[0]), len(train_data[1])
```
### Converta os inteiros de volta a palavras
É util saber como converter inteiros de volta a texto. Aqui, criaremos uma função de ajuda para consultar um objeto *dictionary* que contenha inteiros mapeados em strings:
```
# Um dicionário mapeando palavras em índices inteiros
word_index = imdb.get_word_index()
# Os primeiros índices são reservados
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
```
Agora, podemos usar a função `decode_review` para mostrar o texto da primeira avaliação:
```
decode_review(train_data[0])
```
## Prepare os dados
As avaliações —os *arrays* de inteiros— devem ser convertidas em tensores (*tensors*) antes de alimentar a rede neural. Essa conversão pode ser feita de duas formas:
* Converter os arrays em vetores de 0s e 1s indicando a ocorrência da palavra, similar com one-hot encoding. Por exemplo, a sequência [3, 5] se tornaria um vetor de 10000 dimensões, onde todos seriam 0s, tirando 3 e 5, que são 1s. Depois, faça disso a primeira camada da nossa rede neural — a Dense layer — que pode trabalhar com dados em ponto flutuante. Essa abordagem é intensa em relação a memória, logo requer uma matriz de tamanho `num_words * num_reviews`.
* Alternativamente, podemos preencher o array para que todos tenho o mesmo comprimento, e depois criar um tensor inteiro de formato `max_length * num_reviews`. Podemos usar uma camada *embedding* capaz de lidar com o formato como a primeira camada da nossa rede.
Nesse tutorial, usaremos a segunda abordagem.
Já que as avaliações dos filmes devem ter o mesmo tamanho, usaremos a função [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) para padronizar os tamanhos:
```
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=word_index["<PAD>"],
padding='post',
maxlen=256)
```
Agora, vamos olhar o tamanho dos exemplos:
```
len(train_data[0]), len(train_data[1])
```
E inspecionar as primeiras avaliações (agora preenchidos):
```
print(train_data[0])
```
## Construindo o modelo
A rede neural é criada por camadas empilhadas —isso necessita duas decisões arquiteturais principais:
* Quantas camadas serão usadas no modelo?
* Quantas *hidden units* são usadas em cada camada?
Neste exemplo, os dados de entrada são um *array* de palavras-índices. As *labels* para predizer são ou 0 ou 1. Vamos construir um modelo para este problema:
```
# O formato de entrada é a contagem vocabulário usados pelas avaliações dos filmes (10000 palavras)
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
```
As camadas são empilhadas sequencialmente para construir o classificador:
1. A primeira camada é uma camada `Embedding` (*`Embedding` layer*). Essa camada pega o vocabulário em inteiros e olha o vetor *embedding* em cada palavra-index. Esses vetores são aprendidos pelo modelo, ao longo do treinamento. Os vetores adicionam a dimensão ao *array* de saída. As dimensões resultantes são: `(batch, sequence, embedding)`.
2. Depois, uma camada `GlobalAveragePooling1D` retorna um vetor de saída com comprimento fixo para cada exemplo fazendo a média da sequência da dimensão. Isso permite o modelo de lidar com entradas de tamanhos diferentes da maneira mais simples possível.
3. Esse vetor de saída com tamanho fixo passa por uma camada *fully-connected* (`Dense`) layer com 16 *hidden units*.
4. A última camada é uma *densely connected* com um único nó de saída. Usando uma função de ativação `sigmoid`, esse valor é um float que varia entre 0 e 1, representando a probabilidade, ou nível de confiança.
### Hidden units
O modelo abaixo tem duas camadas intermediárias ou _\"hidden\"_ (hidden layers), entre a entrada e saída. O número de saídas (unidades— *units*—, nós ou neurônios) é a dimensão do espaço representacional para a camada. Em outras palavras, a quantidade de liberdade que a rede é permitida enquanto aprende uma representação interna.
Se o modelo tem mais *hidden units* (um espaço representacional de maior dimensão), e/ou mais camadas, então a rede pode aprender representações mais complexas. Entretanto, isso faz com que a rede seja computacionalmente mais custosa e pode levar ao aprendizado de padrões não desejados— padrões que melhoram a performance com os dados de treinamento, mas não com os de teste. Isso se chama *overfitting*, e exploraremos mais tarde.
### Função Loss e otimizadores (optimizer)
O modelo precisa de uma função *loss* e um otimizador (*optimizer*) para treinamento. Já que é um problema de classificação binário e o modelo tem como saída uma probabilidade (uma única camada com ativação sigmoide), usaremos a função loss `binary_crossentropy`.
Essa não é a única escolha de função loss, você poderia escolher, no lugar, a `mean_squared_error`. Mas, geralmente, `binary_crossentropy` é melhor para tratar probabilidades— ela mede a \"distância\" entre as distribuições de probabilidade, ou, no nosso caso, sobre a distribuição real e as previsões.
Mais tarde, quando explorarmos problemas de regressão (como, predizer preço de uma casa), veremos como usar outra função loss chamada *mean squared error*.
Agora, configure o modelo para usar o *optimizer* a função loss:
```
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
```
## Crie um conjunto de validação
Quando treinando. queremos checar a acurácia do modelo com os dados que ele nunca viu. Crie uma conjunto de *validação* tirando 10000 exemplos do conjunto de treinamento original. (Por que não usar o de teste agora? Nosso objetivo é desenvolver e melhorar (tunar) nosso modelo usando somente os dados de treinamento, depois usar o de teste uma única vez para avaliar a acurácia).
```
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
```
## Treine o modelo
Treine o modelo em 40 *epochs* com *mini-batches* de 512 exemplos. Essas 40 iterações sobre todos os exemplos nos tensores `x_train` e `y_train`. Enquanto treina, monitore os valores do loss e da acurácia do modelo nos 10000 exemplos do conjunto de validação:
```
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
```
## Avalie o modelo
E vamos ver como o modelo se saiu. Dois valores serão retornados. Loss (um número que representa o nosso erro, valores mais baixos são melhores), e acurácia.
```
results = model.evaluate(test_data, test_labels, verbose=2)
print(results)
```
Esta é uma abordagem ingênua que conseguiu uma acurácia de 87%. Com abordagens mais avançadas, o modelo deve chegar em 95%.
## Crie um gráfico de acurácia e loss por tempo
`model.fit()` retorna um objeto `History` que contém um dicionário de tudo o que aconteceu durante o treinamento:
```
history_dict = history.history
history_dict.keys()
```
Tem 4 entradas: uma para cada métrica monitorada durante a validação e treinamento. Podemos usá-las para plotar a comparação do loss de treinamento e validação, assim como a acurácia de treinamento e validação:
```
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
```
No gráfico, os pontos representam o loss e acurácia de treinamento, e as linhas são o loss e a acurácia de validação.
Note: que o loss de treinamento *diminui* a cada *epoch* e a acurácia *aumenta*. Isso é esperado quando usado um gradient descent optimization—ele deve minimizar a quantidade desejada a cada iteração.
Esse não é o caso do loss e da acurácia de validação— eles parecem ter um pico depois de 20 epochs. Isso é um exemplo de *overfitting*: o modelo desempenha melhor nos dados de treinamento do que quando usado com dados nunca vistos. Depois desse ponto, o modelo otimiza além da conta e aprende uma representação *especifica* para os dados de treinamento e não *generaliza* para os dados de teste.
Para esse caso particular, podemos prevenir o *overfitting* simplesmente parando o treinamento após mais ou menos 20 epochs. Depois, você verá como fazer isso automaticamente com um *callback*.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/R-aryan/Image_Classification_VGG16/blob/master/Classification_Cat_VS_Dogs_Transfer_Learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import keras,os
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras import optimizers
from keras.models import load_model
import numpy as np
import shutil
from os import listdir
from os.path import splitext
from keras.preprocessing import image
import matplotlib.pyplot as plt
train_directory= "/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train"
test_directory="/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1"
src= '/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train'
dest_d='/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train/Dogs'
dest_c='/content/drive/My Drive/classification_Dataset/cat_VS_dogs/train/Cats'
validation_set='/content/drive/My Drive/classification_Dataset/cat_VS_dogs/validation_data'
trdata = ImageDataGenerator()
traindata = trdata.flow_from_directory(directory=src,target_size=(224,224),batch_size=32)
tsdata = ImageDataGenerator()
testdata = tsdata.flow_from_directory(directory=validation_set, target_size=(224,224),batch_size=32)
```
Here using the ImageDataGenerator method in keras I will import all the images of cat and dog in the model. ImageDataGenerator will automatically label the data and map all the labels to its specific data.
```
vggmodel = VGG16(weights='imagenet', include_top=True)
```
Here in this part I will import VGG16 from keras with pre-trained weights which was trained on imagenet. Here as you can see that include top parameter is set to true. This means that weights for our whole model will be downloaded. If this is set to false then the pre-trained weights will only be downloaded for convolution layers and no weights will be downloaded for dense layers.
```
vggmodel.summary()
```
Now as I run vggmodel.summary() then the summary of the whole VGG model which was downloaded will be printed. Its output is attached below.
```
```
After the model has been downloaded then I need to use this model for my problem statement which is to detect cats and dogs. So here I will set that I will not be training the weights of the first 19 layers and use it as it is. Therefore i am setting the trainable parameter to False for first 19 layers.
```
vggmodel.layers
for layers in (vggmodel.layers)[:19]:
print(layers)
layers.trainable = False
```
Since my problem is to detect cats and dogs and it has two classes so the last dense layer of my model should be a 2 unit softmax dense layer. Here I am taking the second last layer of the model which is dense layer with 4096 units and adding a dense softmax layer of 2 units in the end. In this way I will remove the last layer of the VGG16 model which is made to predict 1000 classes.
```
X= vggmodel.layers[-2].output
predictions = Dense(2, activation="softmax")(X)
model_final = Model(input = vggmodel.input, output = predictions)
```
Now I will compile my new model. I will set the learning rate of SGD (Stochastic Gradient Descent) optimiser using lr parameter and since i have a 2 unit dense layer in the end so i will be using categorical_crossentropy as loss since the output of the model is categorical.
```
model_final.compile(loss = "categorical_crossentropy", optimizer = optimizers.SGD(lr=0.0001, momentum=0.9), metrics=["accuracy"])
model_final.summary()
from keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint = ModelCheckpoint("/content/drive/My Drive/classification_Dataset/vgg16_tl.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=40, verbose=1, mode='auto')
model_final.fit_generator(generator= traindata, steps_per_epoch= 2, epochs= 100, validation_data= testdata, validation_steps=1, callbacks=[checkpoint,early])
model_final.save_weights("/content/drive/My Drive/classification_Dataset/vgg16_tl.h5")
```
Predicting the output
```
# from keras.preprocessing import image
# import matplotlib.pyplot as plt
img = image.load_img("/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1/12500.jpg",target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
from keras.models import load_model
model_final.load_weights("/content/drive/My Drive/classification_Dataset/vgg16_tl.h5")
#saved_model.compile()
output = model_final.predict(img)
if output[0][0] > output[0][1]:
print("cat")
else:
print('dog')
def prediction(path_image):
img = image.load_img(path_image,target_size=(224,224))
img = np.asarray(img)
plt.imshow(img)
img = np.expand_dims(img, axis=0)
model_final.load_weights("/content/drive/My Drive/classification_Dataset/vgg16_tl.h5")
output = model_final.predict(img)
if output[0][0] > output[0][1]:
print("cat")
else:
print('dog')
prediction("/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1/12500.jpg")
prediction("/content/drive/My Drive/classification_Dataset/cat_VS_dogs/test1/12499.jpg")
```
|
github_jupyter
|
# Neste notebook vamos simular a interconexão entre SLITs
```
# importar as bibliotecas necessárias
import numpy as np # arrays
import matplotlib.pyplot as plt # plots
plt.rcParams.update({'font.size': 14})
import IPython.display as ipd # to play signals
import sounddevice as sd
import soundfile as sf
# Os próximos módulos são usados pra criar nosso SLIT
from scipy.signal import butter, lfilter, freqz, chirp, impulse
```
# Vamos criar 2 SLITs
Primeiro vamos criar dois SLITs. Um filtro passa alta e um passa-baixa. Você pode depois mudar a ordem de um dos filtros e sua frequência de corte e, então, observar o que acontece na FRF do SLIT concatenado.
```
# Variáveis do filtro
order1 = 6
fs = 44100 # sample rate, Hz
cutoff1 = 1000 # desired cutoff frequency of the filter, Hz
# Passa baixa
b, a = butter(order1, 2*cutoff1/fs, btype='low', analog=False)
w, H1 = freqz(b, a)
# Passa alta
cutoff2 = 1000
order2 = 6
b, a = butter(order2, 2*cutoff2/fs, btype='high', analog=False)
w, H2 = freqz(b, a)
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(H1)), 'b', linewidth = 2, label = 'Passa-baixa')
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(H2)), 'r', linewidth = 2, label = 'Passa-alta')
plt.title('Magnitude')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.ylim((-100, 20))
plt.subplot(1,2,2)
plt.semilogx(fs*w/(2*np.pi), np.angle(H1), 'b', linewidth = 2, label = 'Passa-baixa')
plt.semilogx(fs*w/(2*np.pi), np.angle(H2), 'r', linewidth = 2, label = 'Passa-alta')
plt.legend(loc = 'upper right')
plt.title('Fase')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.show()
```
# Interconexão em série
\begin{equation}
H(\mathrm{j}\omega) = H_1(\mathrm{j}\omega)H_2(\mathrm{j}\omega)
\end{equation}
```
Hs = H1*H2
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(H1)), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(H2)), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(Hs)), 'b', linewidth = 2, label = 'R: Band pass')
plt.title('Magnitude')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.ylim((-100, 20))
plt.subplot(1,2,2)
plt.semilogx(fs*w/(2*np.pi), np.angle(H1), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), np.angle(H2), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), np.angle(Hs), 'b', linewidth = 2, label = 'R: Band pass')
plt.legend(loc = 'upper right')
plt.title('Fase')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.show()
```
# Interconexão em paralelo
\begin{equation}
H(\mathrm{j}\omega) = H_1(\mathrm{j}\omega)+H_2(\mathrm{j}\omega)
\end{equation}
```
Hs = H1+H2
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(H1)), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(H2)), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), 20 * np.log10(abs(Hs)), 'b', linewidth = 2, label = 'R: All pass')
plt.title('Magnitude')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.ylim((-100, 20))
plt.subplot(1,2,2)
plt.semilogx(fs*w/(2*np.pi), np.angle(H1), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), np.angle(H2), '--k', linewidth = 2)
plt.semilogx(fs*w/(2*np.pi), np.angle(Hs), 'b', linewidth = 2, label = 'R: All pass')
plt.legend(loc = 'upper right')
plt.title('Fase')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.show()
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import pyflux as pf
import matplotlib.pyplot as plt
from fbprophet import Prophet
%matplotlib inline
plt.rcParams['figure.figsize']=(20,10)
plt.style.use('ggplot')
```
### Load the data
For this work, we're going to use the same retail sales data that we've used before. It can be found in the examples directory of this repository.
```
sales_df = pd.read_csv('../examples/retail_sales.csv', index_col='date', parse_dates=True)
sales_df.head()
```
Like all good modeling projects, we need to take a look at the data to get an idea of what it looks like.
```
sales_df.plot()
```
It's pretty clear from this data that we are looking at a trending dataset with some seasonality. This is actually a pretty good datset for prophet since the additive model and prophet's implemention does well with this type of data.
With that in mind, let's take look at what prophet does from a modeling standpoint to compare with the dynamic linear regression model. For more details on this, you can take a look at my blog post titled **Forecasting Time Series data with Prophet – Part 4** (http://pythondata.com/forecasting-time-series-data-prophet-part-4/)
```
# Prep data for prophet and run prophet
df = sales_df.reset_index()
df=df.rename(columns={'date':'ds', 'sales':'y'})
model = Prophet(weekly_seasonality=True)
model.fit(df);
future = model.make_future_dataframe(periods=24, freq = 'm')
forecast = model.predict(future)
model.plot(forecast);
```
With our prophet model ready for comparison, let's build a model with pyflux's dynamic linear regresion model.
### More Data Viz
Now that we've run our prophet model and can see what it has done, its time to walk through what I call the 'long form' of model building. This is more involved than throwing data at a library and accepting the results.
For this data, let's first look at the differenced log values of our sales data (to try to make it more stationary).
```
diff_log = pd.DataFrame(np.diff(np.log(sales_df['sales'].values)))
diff_log.index = sales_df.index.values[1:sales_df.index.values.shape[0]]
diff_log.columns = ["Sales DiffLog"]
sales_df['logged']=np.log(sales_df['sales'])
sales_df.tail()
sales_df.plot(subplots=True)
```
With our original data (top pane in orange), we can see a very pronounced trend. With the differenced log values (bottom pane in blue), we've removed that trend and made the data staionary (or hopefully we have).
Now, lets take a look at an autocorrelation plot, which will tell us whether the future sales is correlated with the past data. I won't go into detail on autocorrelation, but if you don't understand whether you have autocorrelation (and to what degree), you might be in for a hard time :)
Let's take a look at the autocorrelation plot (acf) if the differenced log values as well as the ACF of the square of the differenced log values.
```
pf.acf_plot(diff_log.values.T[0])
pf.acf_plot(np.square(diff_log.values.T[0]))
```
We can see that at a lag of 1 and 2 months, there are positive correlations for sales but as time goes on, that correlation drops quickly to a negative correlation that stays in place over time, which hints at the fact that there are some autoregressive effects within this data.
Because of this fact, we can start our modeling by using an ARMA model of some sort.
```
Logged = pd.DataFrame(np.log(sales_df['sales']))
Logged.index = pd.to_datetime(sales_df.index)
Logged.columns = ['Sales - Logged']
Logged.head()
modelLLT = pf.LLT(data=Logged)
x = modelLLT.fit()
x.summary()
model.plot_fit(figsize=(20,10))
modelLLT.plot_predict_is(h=len(Logged)-1, figsize=(20,10))
predicted = modelLLT.predict_is(h=len(Logged)-1)
predicted.columns = ['Predicted']
predicted.tail()
np.exp(predicted).plot()
sales_df_future=sales_df
sales_df
final_sales=sales_df.merge(np.exp(predicted),right_on=predicted.index)
final_sales = sales_df.merge()
final_sales.tail()
final_sales.plot()
```
|
github_jupyter
|
```
import urllib.request
import json
import pandas as pd
from datetime import datetime
import seaborn as sns
cm = sns.light_palette("red", as_cmap=True)
#https://www.trilhaseaventuras.com.br/siglas-dos-principais-aeroportos-do-mundo-iata/
#urlOneWay
#https://www.decolar.com/shop/flights-busquets/api/v1/web/search?adults=1&children=0&infants=0&limit=4&site=BR&channel=site&from=POA&to=MIA&departureDate=2020-03-04&groupBy=default&orderBy=total_price_ascending&viewMode=CLUSTER&language=pt_BR&airlineSummary=false&chargesDespegar=false&user=e1861e3a-3357-4a76-861e-3a3357ea76c0&h=38dc1f66dbf4f5c8df105321c3286b5c&flow=SEARCH&di=1-0&clientType=WEB&disambiguationApplied=true&newDisambiguationService=true&initialOrigins=POA&initialDestinations=MIA&pageViewId=62ef8aab-ab53-406c-8429-885702acecbd
import requests
url = "https://www.pontosmultiplus.com.br/service/facilities/handle-points"
payload = "logado=&select-name=1000&points=1000&action=calculate"
headers = {
'authority': 'www.pontosmultiplus.com.br',
'accept': 'application/json, text/javascript, */*; q=0.01',
'origin': 'https://www.pontosmultiplus.com.br',
'x-requested-with': 'XMLHttpRequest',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'dnt': '1',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'referer': 'https://www.pontosmultiplus.com.br/facilidades/compradepontos',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',
'cookie': 'userPrefLanguage=pt_BR; sback_client=573a40fecdbbbb66963e544d; sback_partner=false; sb_days=1549545557254; sback_browser=0-14236400-1548261174b4075e5fdbd390aa38772d39e7c7a352593b045121165093285c48973622c2c1-45877488-170246360, 20525122160-1549545560; sback_customer=$2QUxcVTzd0dOVENUtmd6dlTmp3RjlHVF90bxETQ1oWOad0dWF0QUN3T0hnYBlFVx0UO5BVWTRFVNZTblh2YqRkW2$12; chaordic_browserId=09b01e60-2300-11e9-8ced-6fbc9e419cda; chaordic_anonymousUserId=anon-09b01e60-2300-11e9-8ced-6fbc9e419cda; sback_total_sessions=2; _ducp=eyJfZHVjcCI6ImE4MzY0NWM2LTI3ZWYtNGUzZS1iMzNjLTI3YmY4ZTY4MDMwOCIsIl9kdWNwcHQiOiIifQ==; _fbp=fb.2.1550499169207.1066950068; cto_h2h=B; s_fid=2E4956A0C0C14E48-2CB286BB7EF81637; cto_lwid=01abc4e4-21f3-450f-9f35-57dee229928a; __utmz=196304045.1569964079.10.2.utmcsr=multiplus-emkt|utmccn=20190930_EMAIL_INSTITUCIONAL_BOAS_VINDAS_NOVA_MARCA_BRASIL-20191001|utmcmd=emkt|utmctr=14602|utmcct=cabecalho-ver_extrato_deslogado; s_vnum=1596641437499%26vn%3D2; s_lv=1569964112519; optionExchange=0; origin=[{%22city%22:{%22iataCode%22:%22POA%22%2C%22name%22:%22Porto%20Alegre%22}%2C%22type%22:%22airport%22%2C%22iataCode%22:%22POA%22%2C%22name%22:%22Salgado%20Filho%22%2C%22value%22:%22POA_airport%22%2C%22orderCodeNumber%22:%222%22%2C%22orderCode%22:%22Porto%20Alegre2%22%2C%22label%22:%22Porto%20Alegre%20(POA)%2C%20Salgado%20Filho%20(POA)%2C%20Brasil%22%2C%22position%22:%2200002Porto%20Alegre%20(POA)%2C%20Salgado%20Filho%20(POA)%2C%20Brasil%22}]; destiny=[{%22city%22:{%22iataCode%22:%22FRA%22%2C%22name%22:%22Frankfurt%22}%2C%22type%22:%22airport%22%2C%22iataCode%22:%22FRA%22%2C%22name%22:%22Frankfurt%20Intl.%22%2C%22value%22:%22FRA_airport%22%2C%22orderCodeNumber%22:%222%22%2C%22orderCode%22:%22Frankfurt2%22%2C%22label%22:%22Frankfurt%20(FRA)%2C%20Frankfurt%20Intl.%20(FRA)%2C%20Alemanha%22%2C%22position%22:%2200002Frankfurt%20(FRA)%2C%20Frankfurt%20Intl.%20(FRA)%2C%20Alemanha%22}]; cabinClass=Y; classesSuggestions=[{%22idCabin%22:1%2C%22cabinClass%22:%22Y%22%2C%22cabinName%22:%22Economy%22}%2C{%22idCabin%22:2%2C%22cabinClass%22:%22W%22%2C%22cabinName%22:%22Premium%20Economy%22}%2C{%22idCabin%22:3%2C%22cabinClass%22:%22J%22%2C%22cabinName%22:%22Premium%20Business%22}]; _gcl_au=1.1.278670892.1578924604; _hjid=59ae5b53-f6c8-48b1-bc67-fb8182856ead; chaordic_testGroup=%7B%22experiment%22%3Anull%2C%22group%22%3Anull%2C%22testCode%22%3Anull%2C%22code%22%3Anull%2C%22session%22%3Anull%7D; country_code=br; language_code=pt; __utmc=196304045; _esvan_ref.50060.=; language_country=pt_br; _ga=GA1.3.1171237216.1579530427; _gid=GA1.3.911523691.1579530427; _gaZ=GA1.3.1171237216.1579530427; _gaZ_gid=GA1.3.911523691.1579530427; return=Sat%20Apr%2011%202020%2012:00:00%20GMT-0300%20(Hor%C3%A1rio%20Padr%C3%A3o%20de%20Bras%C3%ADlia); trip=ida_vuelta; departure=Sat%20Apr%2004%202020%2012:00:00%20GMT-0300%20(Hor%C3%A1rio%20Padr%C3%A3o%20de%20Bras%C3%ADlia); SMSESSION=LOGGEDOFF; userIdZ=; __utma=196304045.1744687836.1549545551.1579547569.1579636257.15; analyticsHelper.cd38=ef144e288de8d22700e20cda9fce9ee5ee61b5d25b61bd0dab35f4ddc72e95ce; ATGSESSIONID=yiPNORqQ9P7PZ74G-Syy7CLAjB8uk3Tw0Wc4dHWdUyC7KjCIe4s0u0021-680739279; __zjc7749=4962761565; userTags=%7B%22id%22%3A%22Anonimo%22%2C%22age%22%3A0%2C%22gender%22%3Anull%2C%22email%22%3Anull%2C%22emailHash%22%3Anull%2C%22country%22%3Anull%2C%22city%22%3Anull%2C%22state%22%3Anull%2C%22zipCode%22%3Anull%2C%22typeOfParticipation%22%3Anull%2C%22balance%22%3Anull%2C%22status%22%3A%22deslogado%22%7D; _gac_UA-83192457-1=1.1579696070.CjwKCAiAgqDxBRBTEiwA59eEN-j8nGbsIpfJMIrCCHTfzUi4saF5CmN227pOPsXIuXAOZmOQs_DMSRoCBtMQAvD_BwE; _gcl_aw=GCL.1579696070.CjwKCAiAgqDxBRBTEiwA59eEN-j8nGbsIpfJMIrCCHTfzUi4saF5CmN227pOPsXIuXAOZmOQs_DMSRoCBtMQAvD_BwE; _dc_gtm_UA-83192457-1=1; _gac_UA-83192457-13=1.1579696070.CjwKCAiAgqDxBRBTEiwA59eEN-j8nGbsIpfJMIrCCHTfzUi4saF5CmN227pOPsXIuXAOZmOQs_DMSRoCBtMQAvD_BwE; _dc_gtm_UA-83192457-13=1; __z_a=3200530082274793935727479; JSESSIONID=_hHNOSuko30OZo1X7XyjT4_6rnAXanFcwA7M9PShrPBBjztzhMrIu0021-1010243761; SS_X_JSESSIONID=KoLNOSzOIq0SooUobVecEo7ju0GL-8Y2O_kOVlqjZsm5rKnmkG33u0021-183582721; akavpau_multiplusgeral=1579696676~id=48e1b4d4309a5f9f09664afd46406b0e; __zjc872=4962761577; _gat=1'
}
response = requests.request("POST", url, headers=headers, data = payload)
resultPontos = response.text.encode('utf8')
resPontos = json.loads(resultPontos.decode('utf-8'))
print(resPontos['data']['total'])
PONTOSMULTIPLUS = resPontos['data']['total']
dataInicial = '2020-07-03'
dataFinal = '2020-07-19'
idaEvolta=True
#tripType=''
#dataInicial = '2020-04-08'
#dataFinal = '2020-04-22'
#if idaEvolta:
# tripType = 'roundtrip'
#else:
# tripType = 'oneway'
specificDate = False
origens = ['POA','GRU','GIG']
destinos = ['ATL','MIA','MDZ','BRC','LIM','CTG','ADZ','FRA']
#dfDict.append({'de':origem,'para':destino,'Ida': p['departureDate'],'Volta':arr['arrivalDate'],'preco':arr['price']["amount"]})
resumo = []
dfDict =[]
for origem in origens:
for destino in destinos:
minValue = 999999999
fraseFinal= ''
print(origem + ' -> '+ destino)
urlDecolar = '''https://www.decolar.com/shop/flights-busquets/api/v1/web/calendar-prices/matrix?adults=1&children=0&infants=0&limit=4&site=BR&channel=site&from={origem}&to={destino}&departureDate={dataInicial}&returnDate={dataFinal}&orderBy=total_price_ascending&viewMode=CLUSTER&language=pt_BR&clientType=WEB&initialOrigins={origem}&initialDestinations={destino}&pageViewId=b35e67df-abc9-4308-875f-c3810b3729e4&mustIncludeDates=NA_NA¤cy=BRL&breakdownType=TOTAL_FARE_ONLY'''.format(dataInicial=dataInicial,dataFinal=dataFinal,origem=origem,destino=destino)
#print(urlDecolar)
with urllib.request.urlopen(urlDecolar) as url:
s = url.read()
data = json.loads(s.decode('utf-8'))
#print(data)
for p in data['departures']:
for arr in p['arrivals']:
if 'price' in arr:
dfDict.append({'DataPesquisa':datetime.now().strftime("%d/%m/%Y %H:%M:%S"),'de':origem,'para':destino,'Ida': p['departureDate'],'Volta':arr['arrivalDate'],'preco':arr['price']["amount"]})
if specificDate:
if p['departureDate'] == dataInicial and arr['arrivalDate'] == dataFinal:
if minValue > arr['price']["amount"]:
minValue = arr['price']["amount"]
fraseFinal = 'Voo mais barato '+origem + ' -> '+ destino+' de:' + p['departureDate'], ' ate ',arr['arrivalDate'],'- valor: ' + str(arr['price']["amount"])
resumo.append(fraseFinal)
print('de:' + p['departureDate'], ' ate ',arr['arrivalDate'],'- valor: ' + str(arr['price']["amount"]))
else:
if minValue > arr['price']["amount"]:
minValue = arr['price']["amount"]
fraseFinal = 'Voo mais barato '+origem + ' -> '+ destino+' de:' + p['departureDate'], ' ate ',arr['arrivalDate'],'- valor: ' + str(arr['price']["amount"])
resumo.append(fraseFinal)
print('de:' + p['departureDate'], ' ate ',arr['arrivalDate'],'- valor: ' + str(arr['price']["amount"]))
print('')
print(fraseFinal)
print(minValue)
print('')
for r in resumo:
print(r)
df = pd.DataFrame.from_dict(dfDict)
if specificDate:
df = df[df['Ida']==dataInicial]
df = df[df['Volta']==dataFinal]
display(df.describe())
df.sort_values(by='preco',ascending=True).head(5).style.background_gradient(cmap='OrRd')
with open('historicoPesquisaPrecos.csv', 'a') as f:
df.to_csv(f, mode='a',header=f.tell()==0)
dfGrafico = pd.read_csv("historicoPesquisaPrecos.csv")
dfGrafico = dfGrafico[dfGrafico['Ida']>='2020-07-03']
dfGrafico = dfGrafico[dfGrafico['Ida']<='2020-07-07']
dfGrafico = dfGrafico[dfGrafico['Volta']>='2020-07-17']
dfGrafico = dfGrafico[dfGrafico['Volta']<='2020-07-20']
dfGrafico['DataPesquisa'] = dfGrafico['DataPesquisa'].apply(lambda x:x[0:13])
dfGrafico['DataPesquisaDATA']=dfGrafico['DataPesquisa'].apply(lambda x:pd.to_datetime(x[0:10]))
dfGrafico['Dias'] = dfGrafico.apply(lambda x: int(str(pd.to_datetime(x['Volta'])- pd.to_datetime(x['Ida']))[0:2]),axis=1)
dfGrafico['OrigemDestino'] = dfGrafico.apply(lambda x: x['de'] + x['para'],axis=1)
dfGrafico['EspecificoIda'] = dfGrafico.apply(lambda x: x['de'] + x['para']+'-'+x['Ida'],axis=1)
dfGrafico['EspecificoVolta'] = dfGrafico.apply(lambda x: x['de'] + x['para']+'-'+x['Volta'],axis=1)
dfGrafico['EspecificoTodos'] = dfGrafico.apply(lambda x: x['de'] + x['para']+'-'+x['Ida']+'-'+x['Volta'],axis=1)
display(dfGrafico)
#dfGraficoPOA_ATL = dfGrafico.query('de == "POA" & para == "ATL"')
#dfGraficoPOA_MIA = dfGrafico.query('de == "POA" & para == "MIA"')
#dfGraficoGRU_MIA = dfGrafico.query('de == "GRU" & para == "MIA"')
#dfGraficoGRU_ATL = dfGrafico.query('de == "GRU" & para == "ATL"')
#dfGraficoGRU_MDZ = dfGrafico.query('de == "GRU" & para == "MDZ"')
#dfGraficoPOA_MDZ = dfGrafico.query('de == "POA" & para == "MDZ"')
#datasets = [dfGrafico,dfGraficoPOA_ATL,dfGraficoPOA_MIA,dfGraficoGRU_MIA,dfGraficoGRU_ATL,dfGraficoGRU_MDZ,dfGraficoPOA_MDZ]
#print(dfGraficoPOA_ATL['Ida'].count())
#print(dfGraficoPOA_MIA['Ida'].count())
#print(dfGraficoGRU_MIA['Ida'].count())
#print(dfGraficoGRU_ATL['Ida'].count())
#print(dfGraficoGRU_MDZ['Ida'].count())
#print(dfGraficoPOA_MDZ['Ida'].count())
#import plotly.express as px
#for graph in datasets:
# #graph = graph.query('Ida =="2020-07-05" & Volta =="2020-07-20"')
# graph = graph.query('de =="POA" & Dias >=14 & Dias <=17')# | de =="GRU"')
# fig = px.line(graph.drop_duplicates(), x="DataPesquisa", y="preco", color="EspecificoTodos",hover_data=['de','para','Ida', 'Volta','preco'])
# fig.show()
import pandas_profiling
print(dfGraficoPOA_MIA.columns)
#pandasDf=dfGraficoPOA_MIA[['Ida', 'Volta', 'de', 'para', 'preco','DataPesquisaDATA', 'Dias']]
#display(pandasDf.head(3))
#pandas_profiling.ProfileReport(pandasDf)
dfPivot = dfGrafico.query('de == "POA" or de=="GRU"')
#display(dfPivot.head(3))
dfPivot = pd.pivot_table(dfPivot,values='preco',index=['de','para','Dias','Ida'],columns='DataPesquisa')
```
## Maiores valores da serie historica
```
#display(dfPivot)
#dfPivot.style.apply(highlight_max)
```
## Menores valores da serie historica
```
#dfPivot.style.apply(highlight_min)
#dfLastSearch = dfGrafico.query('de == "POA" or de=="GRU"')
#print(dfLastSearch.groupby(['de','para']).count())
#dfLastSearch = dfLastSearch[dfLastSearch['DataPesquisaDATA']>='21/01/2020']
#dfLastSearchPivot = pd.pivot_table(dfLastSearch,values='preco',index=['de','para','Dias','Ida','Volta'],columns='DataPesquisa')
#dfLastSearchPivot.style.apply(highlight_min)
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import colors
def background_gradient(s, m, M, cmap='PuBu', low=0, high=0):
rng = M - m
norm = colors.Normalize(m - (rng * low),
M + (rng * high))
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: %s' % color for color in c]
#df = pd.DataFrame([[3,2,10,4],[20,1,3,2],[5,4,6,1]])
#dfLastSearchPivot.fillna(0,inplace=True)
#dfLastSearchPivot.query('para == "MIA"').style.background_gradient(cmap='OrRd')
#display(dfLastSearchPivot.style.background_gradient(cmap='OrRd'))
#print(dfLastSearchPivot.groupby(['de','para']).count())
#dfLastSearchPivot.style.apply(background_gradient,cmap='OrRd',m=dfLastSearchPivot.min().min(),M=dfLastSearchPivot.max().max(),low=0,high=7000)
urlPontoLatam = 'https://bff.latam.com/ws/proxy/booking-webapp-bff/v1/public/redemption/recommendations/outbound?departure={dataInicial}&origin={origem}&destination={destino}&cabin=Y&country=BR&language=PT&home=pt_br&return={dataFinal}&adult=1&tierCode=LTAM&tierType=low'
origensPontos = ['POA','GRU','GIG']
destinosPontos = ['ATL','MIA','MDZ','BRC','LIM','CTG','ADZ','FRA']
dataPesquisa = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
dfPontosListIda =[]
dfPontosListVolta =[]
meuSaldoAtual = 22000
for origem in origensPontos:
for destino in destinosPontos:
minValue = 999999999
fraseFinal= ''
print(origem + ' -> '+ destino)
urlPontos = urlPontoLatam.format(dataInicial=dataInicial,dataFinal=dataFinal,origem=origem,destino=destino)
#print(urlDecolar)
with urllib.request.urlopen(urlPontos) as url:
s = url.read()
data = json.loads(s.decode('utf-8'))
try:
for flight in data['data']:
for cabins in flight['flights']:
paradas = cabins['stops']
dataChegada=cabins['arrival']['date']
horaChegada = cabins['arrival']['time']['hours']
minutoChegada = cabins['arrival']['time']['minutes']
overnight = cabins['arrival']['overnights']
#partida
dataPartida=cabins['departure']['date']
horaPartida = cabins['departure']['time']['hours']
minutoPartida = cabins['departure']['time']['minutes']
for price in cabins['cabins']:
dfPontosListIda.append({'DataPesquisa':dataPesquisa,'De':origem,'Para':destino,'PartidaData':dataPartida,'PartidaHora':horaPartida,'PartidaMinuto':minutoPartida,'ChegadaData':dataChegada,'ChegadaHora':horaChegada,'ChegadaMinuto':minutoChegada,'overnight':overnight,'Paradas':paradas,'pontos':price['displayPrice'],'preco':(PONTOSMULTIPLUS *price['displayPrice'])/1000,'precoMenosSaldo':(PONTOSMULTIPLUS *(price['displayPrice']-meuSaldoAtual))/1000})
dfPontosIda = pd.DataFrame.from_dict(dfPontosListIda)
except:
print('erro')
print(destino + ' -> '+ origem)
urlPontos = urlPontoLatam.format(dataInicial=dataFinal,dataFinal=dataFinal,origem=destino,destino=origem)
with urllib.request.urlopen(urlPontos) as url:
s = url.read()
data = json.loads(s.decode('utf-8'))
try:
for flight in data['data']:
for cabins in flight['flights']:
paradas = cabins['stops']
dataChegada=cabins['arrival']['date']
horaChegada = cabins['arrival']['time']['hours']
minutoChegada = cabins['arrival']['time']['minutes']
overnight = cabins['arrival']['overnights']
#partida
dataPartida=cabins['departure']['date']
horaPartida = cabins['departure']['time']['hours']
minutoPartida = cabins['departure']['time']['minutes']
for price in cabins['cabins']:
dfPontosListVolta.append({'DataPesquisa':dataPesquisa,'De':destino,'Para':origem,'PartidaData':dataPartida,'PartidaHora':horaPartida,'PartidaMinuto':minutoPartida,'ChegadaData':dataChegada,'ChegadaHora':horaChegada,'ChegadaMinuto':minutoChegada,'overnight':overnight,'Paradas':paradas,'pontos':price['displayPrice'],'valorPontos':PONTOSMULTIPLUS,'preco':(PONTOSMULTIPLUS *price['displayPrice'])/1000,'precoMenosSaldo':(PONTOSMULTIPLUS *(price['displayPrice']-meuSaldoAtual))/1000})
dfPontosVolta = pd.DataFrame.from_dict(dfPontosListVolta)
except:
print('erro')
with open('historicoPesquisaPontosIda.csv', 'a') as f:
dfPontosIda.to_csv(f, mode='a',header=f.tell()==0)
with open('historicoPesquisaPontosVolta.csv', 'a') as f:
dfPontosVolta.to_csv(f, mode='a',header=f.tell()==0)
#dfLoadPontosIda = pd.read_csv("historicoPesquisaPontosIda.csv")
#dfLoadPontosVolta = pd.read_csv("historicoPesquisaPontosVolta.csv")
#dfPontosC = dfLoadPontosVolta[['DataPesquisa','De','Para','PartidaData','PartidaHora', 'PartidaMinuto','ChegadaData', 'ChegadaHora', 'ChegadaMinuto','Paradas','overnight', 'pontos', 'preco','precoMenosSaldo']]
#display(dfPontosC.sort_values(by='preco',ascending=True).style.background_gradient(cmap='OrRd'))
uriPontos = 'https://www.pontosmultiplus.com.br/service/facilities/handle-points'
#dfT = dfLastSearch
#dfTeste = dfT[dfT['DataPesquisaDATA']=='24/01/2020']
#dfTeste = pd.pivot_table(dfLastSearch,values='preco',index=['de','para','Ida'],columns='Volta')
#dfTeste.fillna(0,inplace=True)
#display(dfTeste.style.background_gradient(cmap='OrRd'))
aa
#POSTMAN ONE WAY
import requests
dataInicial = '2020-04-08'
dataFinal = '2020-04-22'
origens = ['POA','GRU','GIG','BSB','FOR']
destinos = ['ATL','MIA']
url = "https://www.decolar.com/shop/flights-busquets/api/v1/web/search"
for origem in origens:
for destino in destinos:
querystring = {"adults":"1","limit":"4","site":"BR","channel":"site","from":"{origem}".format(origem=origem),"to":"{destino}".format(destino=destino),"departureDate":"2020-03-04","orderBy":"total_price_ascending","viewMode":"CLUSTER","language":"pt_BR","h":"38dc1f66dbf4f5c8df105321c3286b5c","flow":"SEARCH","clientType":"WEB","initialOrigins":"{origem}".format(origem=origem),"initialDestinations":"{destino}".format(destino=destino)}
headers = {
'Connection': "keep-alive",
'DNT': "1",
'X-UOW': "results-13-1579106681089",
'X-RequestId': "xzTTJ6fDfw",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
'Accept': "application/json, text/javascript, */*; q=0.01",
'X-Requested-With': "XMLHttpRequest",
'XDESP-REFERRER': "https://www.decolar.com/shop/flights/search/oneway/{origem}/{destino}/2020-03-04/2/0/0/NA/NA/NA/NA/?from=SB&di=2-0".format(origem=origem,destino=destino),
'Sec-Fetch-Site': "same-origin",
'Sec-Fetch-Mode': "cors",
'Referer': "https://www.decolar.com/shop/flights/search/oneway/{origem}/{destino}/2020-03-04/1/0/0/NA/NA/NA/NA/?from=SB&di=1-0".format(origem=origem,destino=destino),
'Accept-Encoding': "gzip, deflate, br",
'Accept-Language': "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7",
'Cookie': 'trackerid=e1861e3a-3357-4a76-861e-3a3357ea76c0; xdesp-rand-usr=292; xdsid=C632CEAAF251AE2A72F165ECA9A4A2CA; xduid=1727A02D2FAA249C654A094113369154; _ga=GA1.2.772144563.1579011917; _gid=GA1.2.317154519.1579011917; trackeame_cookie=%7B%22id%22%3A%22UPA_e1861e3a-3357-4a76-861e-3a3357ea76c0%22%2C%22version%22%3A%225.0%22%2C%22upa_id%22%3A%22e1861e3a-3357-4a76-861e-3a3357ea76c0%22%2C%22creation_date%22%3A%222020-01-14T14%3A25%3A17Z%22%7D; __ssid=41de76d348be0e334af8e657f6801b8; _gcl_au=1.1.1367791908.1579011932; _fbp=fb.1.1579011933564.1470255143; __gads=ID=9139db3a836078f5:T=1579011933:S=ALNI_MawboBo55i9nPvoDvzaF396HudEKg; abzTestingId="{\"flightsFisherAB\":90,\"pkgImbatibleBrand_ctrl\":76,\"s_flights_s_violet_sbox_v1\":21,\"upsellingConfig\":58,\"twoOneWayForceMX\":0,\"filterLandingFlights\":41,\"s_loyalty_v2_ctrl\":5,\"s_flights_l_violet_sbox_v1\":0,\"s_flights_l_loyalty_v2\":58,\"mostProfitablePromotion\":0,\"despechecks\":72,\"s_loyalty_v2_review\":33,\"platform\":55,\"selected_radio_button\":0,\"fisher_2ow\":0,\"loyalty_non_adherents\":63,\"paymentMethod\":55,\"shifuMobileProductLabels\":0,\"obFee\":40,\"twoOneWay\":0,\"s_violet_sbox_v1\":17,\"s_flights_s_loyalty_v2\":14,\"flights_loyalty_non_adherents\":63,\"pkgImbatibleBrand-ctrl\":60,\"crossBorderTicketing\":0}; chktkn=ask3r5kj6ed0ksqrs7eio4cebk; searchId=243920d8-49cc-4271-972a-60d05221ef20; _gat_UA-36944350-2=1,trackerid=e1861e3a-3357-4a76-861e-3a3357ea76c0; xdesp-rand-usr=292; xdsid=C632CEAAF251AE2A72F165ECA9A4A2CA; xduid=1727A02D2FAA249C654A094113369154; _ga=GA1.2.772144563.1579011917; _gid=GA1.2.317154519.1579011917; trackeame_cookie=%7B%22id%22%3A%22UPA_e1861e3a-3357-4a76-861e-3a3357ea76c0%22%2C%22version%22%3A%225.0%22%2C%22upa_id%22%3A%22e1861e3a-3357-4a76-861e-3a3357ea76c0%22%2C%22creation_date%22%3A%222020-01-14T14%3A25%3A17Z%22%7D; __ssid=41de76d348be0e334af8e657f6801b8; _gcl_au=1.1.1367791908.1579011932; _fbp=fb.1.1579011933564.1470255143; __gads=ID=9139db3a836078f5:T=1579011933:S=ALNI_MawboBo55i9nPvoDvzaF396HudEKg; abzTestingId="{\"flightsFisherAB\":90,\"pkgImbatibleBrand_ctrl\":76,\"s_flights_s_violet_sbox_v1\":21,\"upsellingConfig\":58,\"twoOneWayForceMX\":0,\"filterLandingFlights\":41,\"s_loyalty_v2_ctrl\":5,\"s_flights_l_violet_sbox_v1\":0,\"s_flights_l_loyalty_v2\":58,\"mostProfitablePromotion\":0,\"despechecks\":72,\"s_loyalty_v2_review\":33,\"platform\":55,\"selected_radio_button\":0,\"fisher_2ow\":0,\"loyalty_non_adherents\":63,\"paymentMethod\":55,\"shifuMobileProductLabels\":0,\"obFee\":40,\"twoOneWay\":0,\"s_violet_sbox_v1\":17,\"s_flights_s_loyalty_v2\":14,\"flights_loyalty_non_adherents\":63,\"pkgImbatibleBrand-ctrl\":60,\"crossBorderTicketing\":0}"; chktkn=ask3r5kj6ed0ksqrs7eio4cebk; searchId=243920d8-49cc-4271-972a-60d05221ef20; _gat_UA-36944350-2=1; xdsid=DCF9EDC0035E07BEDBFEE30E55F725C5; xduid=55D857BEFC5E27A8B84A7407D4A86B38; xdesp-rand-usr=292; abzTestingId="{\"flightsFisherAB\":90,\"pkgImbatibleBrand_ctrl\":76,\"s_flights_s_violet_sbox_v1\":21,\"upsellingConfig\":58,\"twoOneWayForceMX\":0,\"filterLandingFlights\":41,\"s_loyalty_v2_ctrl\":5,\"s_flights_l_violet_sbox_v1\":0,\"s_flights_l_loyalty_v2\":58,\"mostProfitablePromotion\":0,\"despechecks\":72,\"s_loyalty_v2_review\":33,\"platform\":55,\"selected_radio_button\":0,\"fisher_2ow\":0,\"loyalty_non_adherents\":63,\"paymentMethod\":55,\"shifuMobileProductLabels\":0,\"obFee\":40,\"twoOneWay\":0,\"s_violet_sbox_v1\":17,\"s_flights_s_loyalty_v2\":14,\"flights_loyalty_non_adherents\":63,\"pkgImbatibleBrand-ctrl\":60,\"crossBorderTicketing\":0}',
'Cache-Control': "no-cache",
'Postman-Token': "4c6c6b9f-ed0a-477f-a787-c8cde039475b,4e35a9da-93ed-4602-825a-283f619d543b",
'Host': "www.decolar.com",
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
dataOneWay = json.loads(response.text)
print(origem, '->' , destino)
print(querystring)
print(dataOneWay)
if 'clusters' in dataOneWay:
for i in dataOneWay['clusters']:
print(i['priceDetail']['mainFare']['amount'])
```
|
github_jupyter
|
Copyright 2020 The Google Research Authors.
Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from pathlib import Path
# dictionary of metrics to plot (each metric is shown in an individual plot)
# dictionary key is name of metric in logs.csv, dict value is label in the final plot
plot_metrics = {'ens_acc': 'Test accuracy', # Ensemble accuracy
'ens_ce': 'Test cross entropy'} # Ensemble cross entropy
# directory of results
# should include 'run_sweeps.csv', generated by run_resnet_experiments.sh/run_resnet_experiments.sh
results_dir = '/tmp/google_research/cold_posterior_bnn/results_resnet/'
# load csv with results of all runs
sweeps_df = pd.read_csv(results_dir+'run_sweeps.csv').set_index('id')
# add final performance of run as columns to sweep_df
for metric in plot_metrics.keys():
sweeps_df[metric] = [0.] * len(sweeps_df)
for i in range(len(sweeps_df)):
# get logs of run
log_dir = sweeps_df.loc[i, 'dir']
logs_df = pd.read_csv('{}{}/logs.csv'.format(results_dir, log_dir))
for metric in plot_metrics:
# get final performace of run and add to df
idx = 0
final_metric = float('nan')
while np.isnan(final_metric):
idx += 1
final_metric = logs_df.tail(idx)[metric].values[0] # indexing starts with 1
sweeps_df.at[i, metric] = final_metric
# save/update csv file
sweeps_df.to_csv(results_dir+'run_sweeps.csv')
# plot
font_scale = 1.1
line_width = 3
marker_size = 7
cm_lines = sns.color_palette('deep')
cm_points = sns.color_palette('bright')
# style settings
sns.reset_defaults()
sns.set_context("notebook", font_scale=font_scale,
rc={"lines.linewidth": line_width,
"lines.markersize" :marker_size}
)
sns.set_style("whitegrid")
for metric, metric_label in plot_metrics.items():
# plot SG-MCMC
fig, ax = plt.subplots(figsize=(7.0, 2.85))
g = sns.lineplot(x='temperature', y=metric, data=sweeps_df, marker='o', label='SG-MCMC', color=cm_lines[0], zorder=2, ci='sd')
# finalize plot
plt.legend(loc=3, fontsize=14)
g.set_xscale('log')
#g.set_ylim(bottom=0.88, top=0.94)
g.set_xlim(left=1e-4, right=1)
fig.tight_layout()
ax.set_frame_on(False)
ax.set_xlabel('Temperature $T$')
ax.set_ylabel(metric_label)
ax.margins(0,0)
plt.savefig('{}resnet_{}.pdf'.format(results_dir, metric_label), format="pdf", dpi=300, bbox_inches="tight", pad_inches=0)
```
|
github_jupyter
|
```
# from numba import jit
# from tqdm import trange
# import pandas as pd
# eo_df = pd.read_csv("/mnt/sda1/cvpr21/Classification/Aerial-View-Object-Classification/data/train_EO.csv")
# eo_df = eo_df.sort_values(by='img_name')
# sar_df = pd.read_csv("/mnt/sda1/cvpr21/Classification/Aerial-View-Object-Classification/data/train_SAR.csv")
# sar_df = sar_df.sort_values(by='img_name')
# @jit()
# def equal():
# notsame_image = 0
# notsame_label = 0
# t = trange(len(sar_df))
# for i in t:
# t.set_postfix({'nums of not same label:': notsame_label})
# eo_label = next(eo_df.iterrows())[1].class_id
# sar_label = next(sar_df.iterrows())[1].class_id
# # if not eo_image == sar_image:
# # notsame_image += 1
# if not eo_label == sar_label:
# notsame_label += 1
# # notsame_label += 1
# # print("nums of not same imageid:", notsame_image)
# #print("nums of not same label:", notsame_label)
# equal()
from __future__ import print_function, division
import torch
import math
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
from torch.autograd import Variable
import random
import torch.nn.functional as F
exp_num = "45_kd_sar-teacher_eo-student_pretrain-on-sar"
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
# tf.set_random_seed(seed)
seed = 2019
seed_everything(seed)
#https://github.com/4uiiurz1/pytorch-auto-augment
import random
import numpy as np
import scipy
from scipy import ndimage
from PIL import Image, ImageEnhance, ImageOps
class AutoAugment(object):
def __init__(self):
self.policies = [
['Invert', 0.1, 7, 'Contrast', 0.2, 6],
['Rotate', 0.7, 2, 'TranslateX', 0.3, 9],
['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3],
['ShearY', 0.5, 8, 'TranslateY', 0.7, 9],
['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2],
['ShearY', 0.2, 7, 'Posterize', 0.3, 7],
['Color', 0.4, 3, 'Brightness', 0.6, 7],
['Sharpness', 0.3, 9, 'Brightness', 0.7, 9],
['Equalize', 0.6, 5, 'Equalize', 0.5, 1],
['Contrast', 0.6, 7, 'Sharpness', 0.6, 5],
['Color', 0.7, 7, 'TranslateX', 0.5, 8],
['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8],
['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6],
['Brightness', 0.9, 6, 'Color', 0.2, 8],
['Solarize', 0.5, 2, 'Invert', 0.0, 3],
['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0],
['Equalize', 0.2, 8, 'Equalize', 0.6, 4],
['Color', 0.9, 9, 'Equalize', 0.6, 6],
['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8],
['Brightness', 0.1, 3, 'Color', 0.7, 0],
['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3],
['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9],
['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3],
['Equalize', 0.8, 8, 'Invert', 0.1, 3],
['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1],
]
def __call__(self, img):
img = apply_policy(img, self.policies[random.randrange(len(self.policies))])
return img
operations = {
'ShearX': lambda img, magnitude: shear_x(img, magnitude),
'ShearY': lambda img, magnitude: shear_y(img, magnitude),
'TranslateX': lambda img, magnitude: translate_x(img, magnitude),
'TranslateY': lambda img, magnitude: translate_y(img, magnitude),
'Rotate': lambda img, magnitude: rotate(img, magnitude),
'AutoContrast': lambda img, magnitude: auto_contrast(img, magnitude),
'Invert': lambda img, magnitude: invert(img, magnitude),
'Equalize': lambda img, magnitude: equalize(img, magnitude),
'Solarize': lambda img, magnitude: solarize(img, magnitude),
'Posterize': lambda img, magnitude: posterize(img, magnitude),
'Contrast': lambda img, magnitude: contrast(img, magnitude),
'Color': lambda img, magnitude: color(img, magnitude),
'Brightness': lambda img, magnitude: brightness(img, magnitude),
'Sharpness': lambda img, magnitude: sharpness(img, magnitude),
'Cutout': lambda img, magnitude: cutout(img, magnitude),
}
def apply_policy(img, policy):
if random.random() < policy[1]:
img = operations[policy[0]](img, policy[2])
if random.random() < policy[4]:
img = operations[policy[3]](img, policy[5])
return img
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = offset_matrix @ matrix @ reset_matrix
return transform_matrix
def shear_x(img, magnitude):
img = np.array(img)
magnitudes = np.linspace(-0.3, 0.3, 11)
transform_matrix = np.array([[1, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 0],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
img = Image.fromarray(img)
return img
def shear_y(img, magnitude):
img = np.array(img)
magnitudes = np.linspace(-0.3, 0.3, 11)
transform_matrix = np.array([[1, 0, 0],
[random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 1, 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
img = Image.fromarray(img)
return img
def translate_x(img, magnitude):
img = np.array(img)
magnitudes = np.linspace(-150/331, 150/331, 11)
transform_matrix = np.array([[1, 0, 0],
[0, 1, img.shape[1]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
img = Image.fromarray(img)
return img
def translate_y(img, magnitude):
img = np.array(img)
magnitudes = np.linspace(-150/331, 150/331, 11)
transform_matrix = np.array([[1, 0, img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
img = Image.fromarray(img)
return img
def rotate(img, magnitude):
img = np.array(img)
magnitudes = np.linspace(-30, 30, 11)
theta = np.deg2rad(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
transform_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
img = Image.fromarray(img)
return img
def auto_contrast(img, magnitude):
img = ImageOps.autocontrast(img)
return img
def invert(img, magnitude):
img = ImageOps.invert(img)
return img
def equalize(img, magnitude):
img = ImageOps.equalize(img)
return img
def solarize(img, magnitude):
magnitudes = np.linspace(0, 256, 11)
img = ImageOps.solarize(img, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def posterize(img, magnitude):
magnitudes = np.linspace(4, 8, 11)
img = ImageOps.posterize(img, int(round(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))))
return img
def contrast(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = ImageEnhance.Contrast(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def color(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = ImageEnhance.Color(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def brightness(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = ImageEnhance.Brightness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def sharpness(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
return img
def cutout(org_img, magnitude=None):
img = np.array(img)
magnitudes = np.linspace(0, 60/331, 11)
img = np.copy(org_img)
mask_val = img.mean()
if magnitude is None:
mask_size = 16
else:
mask_size = int(round(img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])))
top = np.random.randint(0 - mask_size//2, img.shape[0] - mask_size)
left = np.random.randint(0 - mask_size//2, img.shape[1] - mask_size)
bottom = top + mask_size
right = left + mask_size
if top < 0:
top = 0
if left < 0:
left = 0
img[top:bottom, left:right, :].fill(mask_val)
img = Image.fromarray(img)
return img
class Cutout(object):
def __init__(self, length=16):
self.length = length
def __call__(self, img):
img = np.array(img)
mask_val = img.mean()
top = np.random.randint(0 - self.length//2, img.shape[0] - self.length)
left = np.random.randint(0 - self.length//2, img.shape[1] - self.length)
bottom = top + self.length
right = left + self.length
top = 0 if top < 0 else top
left = 0 if left < 0 else top
img[top:bottom, left:right, :] = mask_val
img = Image.fromarray(img)
return img
```
### MIXUP
```
alpha_ = 0.4
# def mixup_data(x, y, alpha=alpha_, use_cuda=True):
# if alpha > 0:
# lam = np.random.beta(alpha, alpha)
# else:
# lam = 1
# batch_size = x.size()[0]
# if use_cuda:
# index = torch.randperm(batch_size).cuda()
# else:
# index = torch.randperm(batch_size)
# mixed_x = lam * x + (1 - lam) * x[index, :]
# y_a, y_b = y, y[index]
# return mixed_x, y_a, y_b, lam
# def mixup_criterion(criterion, pred, y_a, y_b, lam):
# return lam * criterion(pred.float().cuda(), y_a.float().cuda()) + (1 - lam) * criterion(pred.float().cuda(), y_b.float().cuda())
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
# print(y)
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
plt.ion() # interactive mode
EO_data_transforms = {
'Training': transforms.Compose([
transforms.Grayscale(num_output_channels=3),
transforms.Resize((30,30)),
AutoAugment(),
Cutout(),
# transforms.RandomRotation(15,),
# transforms.RandomResizedCrop(30),
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize([0.2913437], [0.12694514])
#transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
#transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'Test': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize(30),
transforms.ToTensor(),
transforms.Normalize([0.2913437], [0.12694514])
#transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
# transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'valid_EO': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize((30,30)),
# AutoAugment(),
# transforms.RandomRotation(15,),
# transforms.RandomResizedCrop(48),
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.2913437], [0.12694514])
# transforms.Grayscale(num_output_channels=1),
# transforms.Resize(48),
# transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5])
# transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
# transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'Training': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize((52,52)),
transforms.RandomRotation(15,),
transforms.RandomResizedCrop(48),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.4062625], [0.12694514])
#transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
#transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'Test': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize(48),
transforms.ToTensor(),
transforms.Normalize([0.4062625], [0.12694514])
#transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
# transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
'valid': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize((52,52)),
transforms.RandomRotation(15,),
transforms.RandomResizedCrop(48),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.4062625], [0.12694514])
# transforms.Grayscale(num_output_channels=1),
# transforms.Resize(48),
# transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5])
# transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
# transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
]),
}
# data_dir = '/mnt/sda1/cvpr21/Classification/ram'
# EO_image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
# EO_data_transforms[x])
# for x in ['Training', 'Test']}
# EO_dataloaders = {x: torch.utils.data.DataLoader(EO_image_datasets[x], batch_size=256,
# shuffle=True, num_workers=64, pin_memory=True)
# for x in ['Training', 'Test']}
# EO_dataset_sizes = {x: len(EO_image_datasets[x]) for x in ['Training', 'Test']}
# EO_class_names = EO_image_datasets['Training'].classes
# image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
# data_transforms[x])
# for x in ['Training', 'Test']}
# combine_dataset = ConcatDataset(EO_image_datasets, image_datasets)
# dataloaders = {x: torch.utils.data.DataLoader(combine_dataset[x], batch_size=256,
# shuffle=True, num_workers=64, pin_memory=True)
# for x in ['Training', 'Test']}
# dataset_sizes = {x: len(image_datasets[x]) for x in ['Training', 'Test']}
# class_names = image_datasets['Training'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# def imshow(inp, title=None):
# """Imshow for Tensor."""
# inp = inp.numpy().transpose((1, 2, 0))
# # mean = np.array([0.1786, 0.4739, 0.5329])
# # std = np.array([[0.0632, 0.1361, 0.0606]])
# # inp = std * inp + mean
# inp = np.clip(inp, 0, 1)
# plt.imshow(inp)
# if title is not None:
# plt.title(title)
# plt.pause(0.001) # pause a bit so that plots are updated
# # Get a batch of training data
# EO_inputs, EO_classes = next(iter(EO_dataloaders['Training']))
# inputs, classes, k ,_= next(iter(dataloaders))
# # Make a grid from batch
# EO_out = torchvision.utils.make_grid(EO_inputs)
# out = torchvision.utils.make_grid(inputs)
# imshow(EO_out, title=[EO_class_names[x] for x in classes])
# imshow(out, title=[class_names[x] for x in classes])
from torch.utils import data
from tqdm import tqdm
from PIL import Image
output_dim = 10
class SAR_EO_Combine_Dataset(data.Dataset):
def __init__(self,df_sar,dirpath_sar,transform_sar,df_eo=None,dirpath_eo=None,transform_eo=None,test = False):
self.df_sar = df_sar
self.test = test
self.dirpath_sar = dirpath_sar
self.transform_sar = transform_sar
self.df_eo = df_eo
# self.test = test
self.dirpath_eo = dirpath_eo
self.transform_eo = transform_eo
#image data
# if not self.test:
# self.image_arr = np.asarray(str(self.dirpath)+'/'+self.df.iloc[:, 0]+'.png')
# else:
# self.image_arr = np.asarray(str(self.dirpath)+'/'+self.df.iloc[:, 0])
# #labels data
# if not self.test:
# self.label_df = self.df.iloc[:,1]
# Calculate length of df
self.data_len = len(self.df_sar.index)
def __len__(self):
return self.data_len
def __getitem__(self, idx):
image_name_sar = self.df_sar.img_name[idx]
image_name_sar = os.path.join(self.dirpath_sar, image_name_sar)
img_sar = Image.open(image_name_sar)#.convert('RGB')
img_tensor_sar = self.transform_sar(img_sar)
image_name_eo = self.df_eo.img_name[idx]
image_name_eo = os.path.join(self.dirpath_eo, image_name_eo)
img_eo = Image.open(image_name_eo)#.convert('RGB')
img_tensor_eo = self.transform_eo(img_eo)
# image_name = self.df.img_name[idx]
# img = Image.open(image_name)#.convert('RGB')
# img_tensor = self.transform(img)
if not self.test:
image_labels = int(self.df_sar.class_id[idx])
# label_tensor = torch.zeros((1, output_dim))
# for label in image_labels.split():
# label_tensor[0, int(label)] = 1
image_label = torch.tensor(image_labels,dtype= torch.long)
image_label = image_label.squeeze()
image_labels_eo = int(self.df_eo.class_id[idx])
# label_tensor_eo = torch.zeros((1, output_dim))
# for label_eo in image_labels_eo.split():
# label_tensor_eo[0, int(label_eo)] = 1
image_label_eo = torch.tensor(image_labels_eo,dtype= torch.long)
image_label_eo = image_label_eo.squeeze()
# print(image_label_eo)
return (img_tensor_sar,image_label), (img_tensor_eo, image_label_eo)
return (img_tensor_sar)
class SAR_EO_Combine_Dataset2(data.Dataset):
def __init__(self,df_sar,dirpath_sar,transform_sar,df_eo=None,dirpath_eo=None,transform_eo=None,test = False):
self.df_sar = df_sar
self.test = test
self.dirpath_sar = dirpath_sar
self.transform_sar = transform_sar
self.df_eo = df_eo
# self.test = test
self.dirpath_eo = dirpath_eo
self.transform_eo = transform_eo
#image data
# if not self.test:
# self.image_arr = np.asarray(str(self.dirpath)+'/'+self.df.iloc[:, 0]+'.png')
# else:
# self.image_arr = np.asarray(str(self.dirpath)+'/'+self.df.iloc[:, 0])
# #labels data
# if not self.test:
# self.label_df = self.df.iloc[:,1]
# Calculate length of df
self.data_len = len(self.df_sar.index)
def __len__(self):
return self.data_len
def __getitem__(self, idx):
image_name_sar = self.df_sar.img_name[idx]
image_name_sar = os.path.join(self.dirpath_sar, image_name_sar)
img_sar = Image.open(image_name_sar)#.convert('RGB')
img_tensor_sar = self.transform_sar(img_sar)
image_name_eo = self.df_eo.img_name[idx]
image_name_eo = os.path.join(self.dirpath_eo, image_name_eo)
img_eo = Image.open(image_name_eo)#.convert('RGB')
img_tensor_eo = self.transform_eo(img_eo)
# image_name = self.df.img_name[idx]
# img = Image.open(image_name)#.convert('RGB')
# img_tensor = self.transform(img)
if not self.test:
image_labels = int(self.df_sar.class_id[idx])
# label_tensor = torch.zeros((1, output_dim))
# for label in image_labels.split():
# label_tensor[0, int(label)] = 1
image_label = torch.tensor(image_labels,dtype= torch.long)
image_label = image_label.squeeze()
image_labels_eo = int(self.df_eo.class_id[idx])
# label_tensor_eo = torch.zeros((1, output_dim))
# for label_eo in image_labels_eo.split():
# label_tensor_eo[0, int(label_eo)] = 1
image_label_eo = torch.tensor(image_labels_eo,dtype= torch.long)
image_label_eo = image_label_eo.squeeze()
# print(image_label_eo)
return (img_tensor_sar,image_label), (img_tensor_eo, image_label_eo)
return (img_tensor_sar)
import pandas as pd
eo_df = pd.read_csv("/home/hans/sandisk/dataset_mover/kd_train_EO.csv")
eo_df = eo_df.sort_values(by='img_name')
sar_df = pd.read_csv("/home/hans/sandisk/dataset_mover/kd_train_SAR.csv")
sar_df = sar_df.sort_values(by='img_name')
eo_test_df = pd.read_csv("/home/hans/sandisk/dataset_mover/kd_test_EO.csv")
eo_test_df = eo_test_df.sort_values(by='img_name')
sar_test_df = pd.read_csv("/home/hans/sandisk/dataset_mover/kd_test_SAR.csv")
sar_test_df = sar_test_df.sort_values(by='img_name')
BATCH_SIZE = 512
dirpath_sar = "/home/hans/sandisk/dataset_mover/kd_train_SAR"
dirpath_eo = "/home/hans/sandisk/dataset_mover/kd_train_EO"
SAR_EO_Combine = SAR_EO_Combine_Dataset(sar_df,dirpath_sar,data_transforms["Test"],eo_df,dirpath_eo,EO_data_transforms["Training"],test = False)
testpath_sar = "/home/hans/sandisk/dataset_mover/kd_val_SAR"
testpath_eo = "/home/hans/sandisk/dataset_mover/kd_val_EO"
test_set = SAR_EO_Combine_Dataset(sar_test_df,testpath_sar,data_transforms["Test"],eo_test_df,testpath_eo,EO_data_transforms["Test"],test = False)
# test_loader = data.DataLoader(dataset=test_dataset,batch_size=BATCH_SIZE,shuffle=False)
train_size = len(SAR_EO_Combine)
test_size = len(test_set)
# from sklearn.model_selection import train_test_split
# train_dataset, test_dataset = train_test_split(SAR_EO_Combine[0], SAR_EO_Combine[2], test_size=0.2, random_state=2017, stratify = SAR_EO_Combine[2])
# train_dataset, test_dataset = torch.utils.data.random_split(SAR_EO_Combine, [train_size, test_size])
data_loader = data.DataLoader(dataset=SAR_EO_Combine,batch_size=BATCH_SIZE,shuffle=True,pin_memory = True)
test_loader = data.DataLoader(dataset=test_set,batch_size=BATCH_SIZE,shuffle=True,pin_memory = True)
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
# mean = np.array([0.1786, 0.4739, 0.5329])
# std = np.array([[0.0632, 0.1361, 0.0606]])
# inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
SAR, EO = next(iter(data_loader))
# Get a batch of training data
EO_inputs, EO_classes = EO[0],EO[1]
inputs, classes = SAR[0],SAR[1]
# EO_class_names = SAR_EO_Combine.image_label
# Make a grid from batch
EO_out = torchvision.utils.make_grid(EO_inputs)
out = torchvision.utils.make_grid(inputs)
imshow(EO_out)#, title=[EO_class_names[x] for x in classes])
imshow(out)#, title=[class_names[x] for x in classes])
print(len(EO_classes))
print(classes)
# a = 0
# for i in range(500):
# SAR, EO = next(iter(data_loader))
# # Get a batch of training data
# EO_inputs, EO_classes = EO[0],EO[1]
# inputs, classes = SAR[0],SAR[1]
# if 9 in classes:
# a+=1
# print(a)
```
### check if paired succeed
```
# from tqdm import trange
# def equal():
# notsame_image = 0
# notsame_label = 0
# t = trange(len(sar_df))
# for i in t:
# t.set_postfix({'nums of not same label:': notsame_label})
# sar, eo = next(iter(data_loader))
# eo_label = eo[1][0].tolist()
# sar_label = sar[1][0].tolist()
# # print(eo_label)
# # print(sar_label)
# # if not eo_image == sar_image:
# # notsame_image += 1
# # eoval = next(eo_label)
# # sarval = next(sar_label)
# if not eo_label==sar_label:
# notsame_label += 1
# # notsame_label += 1
# # print("nums of not same imageid:", notsame_image)
# #print("nums of not same label:", notsame_label)
# equal()
# #next(iter(data_loader))
len(sar_df) == len(eo_df)
next(eo_df.iterrows())[1]
Num_class=10
num_classes = Num_class
num_channel = 1
# model_ft = models.resnet34(pretrained=False)
model_ft = torch.load("10/pre_resnet34_model_epoch99.pt") ## Attention: you need to change to the path of pre_EO.pt file, which located in the repo folder pre-train
# model_ft.conv1 = nn.Conv2d(num_channel, 64, kernel_size=7, stride=2, padding=3,bias=False)
# # model_ft.avgpool = SpatialPyramidPooling((3,3))
# model_ft.fc = nn.Linear(512, Num_class)
# model_ft.conv0 = nn.Conv2d(
# model_ft.features[0] = nn.Conv2d(num_channel, 16, kernel_size=3, stride=2, padding=1,bias=False)
# model_ft.classifier[3] = nn.Linear(1024, Num_class, bias=True)
model_ft.eval()
data_dir = '/mnt/sda1/cvpr21/Classification/ram'
weights = []
for i in range(len(os.listdir(os.path.join(data_dir, "Training")))):
img_num = len([lists for lists in os.listdir(os.path.join(data_dir, "Training",str(i)))])
print('filenum:',len([lists for lists in os.listdir(os.path.join(data_dir, "Training",str(i)))]))# if os.path.isfile(os.path.join(data_dir, lists))]))
weights.append(img_num)
print(weights)
weights = torch.tensor(weights, dtype=torch.float32).cuda()
weights = weights / weights.sum()
print(weights)
weights = 1.0 / weights
weights = weights / weights.sum()
print(weights)
```
### Teacher model (SAR)
```
netT = torch.load("10/resnet34_model_epoch119.pt") ## Attention: you need to change to the path of pre_SAR.pt file, which located in the repo folder pre-train
# netT = torch.load('29_auto_aug_eo_sar_noimagenet/pre_resnet34_eo_epoch99.pt')
criterion2 = nn.KLDivLoss()
netT.eval()
from tqdm.notebook import trange
from tqdm import tqdm_notebook as tqdm
import warnings
warnings.filterwarnings('ignore')
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
print("---------------Start KD FIT( TEACHER AND STUDENT )-----------------")
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_train_acc = 0.0
kd_alpha = 0.2
Loss_list = []
Accuracy_list = []
T_Loss_list = []
T_Accuracy_list = []
for epoch in trange(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# Each epoch has a training and validation phase
for phase in ['Training', 'Test']:#['Test','Training']: #:
if phase == 'Training':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
if phase == 'Training':
for SAR, EO in tqdm(data_loader):
inputs, labels = EO[0], EO[1]
inputs = inputs.to(device)
labels = labels.to(device)
T_input, T_labels = SAR[0], SAR[1]
T_input = T_input.to(device)
T_labels = T_labels.to(device)
# print(T_labels, labels)
# labels = torch.argmax(labels, 0)
# T_labels = torch.argmax(T_labels, 0)
# confusion_matrix = torch.zeros(10, 10)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.no_grad():
soft_target = netT(T_input)
with torch.set_grad_enabled(phase == 'Training'):
outputs = model(inputs) # _,
#print(outputs.dim())
_, preds = torch.max(outputs, 1)
# for t, p in zip(labels.view(-1), preds.view(-1)):
# confusion_matrix[t.long(), p.long()] += 1
# print(confusion_matrix.diag()/confusion_matrix.sum(1))
# _, T_preds = torch.max(soft_target, 1)
T = 2
outputs_S = F.log_softmax(outputs/T, dim=1)
outputs_T = F.softmax(soft_target/T, dim=1)
# print(outputs_S.size())
# print(outputs_T.size())
loss2 = criterion2(outputs_S, outputs_T) * T * T
#print(preds)
if phase == 'Training':
inputs, y_a, y_b, lam = mixup_data(inputs, labels)
inputs, y_a, y_b = map(Variable, (inputs, y_a, y_b))
# print(y_a)
# print(y_b)
loss = mixup_criterion(criterion, outputs, y_a, y_b, lam)
loss = loss*(1-kd_alpha) + loss2*kd_alpha
else:
loss = criterion(outputs, labels)
loss = loss*(1-kd_alpha) + loss2*kd_alpha
# backward + optimize only if in training phase
if phase == 'Training':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'Training':
scheduler.step()
epoch_loss = running_loss / train_size
epoch_acc = running_corrects.double() / train_size
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == "Training":
Loss_list.append(epoch_loss)
Accuracy_list.append(100 * epoch_acc)
else:
T_Loss_list.append(epoch_loss)
T_Accuracy_list.append(100 * epoch_acc)
# deep copy the model
if phase == 'Test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
PATH = os.path.join(str(exp_num), "resnet34_kd_best.pt")#"resnet18_model_epoch{}.pt".format(epoch)
if not os.path.exists(str(exp_num)):
os.makedirs(str(exp_num))
torch.save(model, PATH)
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'Training' and epoch_acc > best_train_acc:
best_train_acc = epoch_acc
# PATH = os.path.join(str(exp_num), "resnet34_kd_best.pt")#"resnet18_model_epoch{}.pt".format(epoch)
# if not os.path.exists(str(exp_num)):
# os.makedirs(str(exp_num))
# torch.save(model, PATH)
#############################################################################
elif phase == 'Test':
acc_matrix_sum = torch.zeros(10)
for SAR, EO in tqdm(test_loader):
inputs, labels = EO[0], EO[1]
inputs = inputs.to(device)
# print(inputs)
labels = labels.to(device)
T_input, T_labels = SAR[0], SAR[1]
T_input = T_input.to(device)
T_labels = T_labels.to(device)
# print(T_labels, labels)
# labels = torch.argmax(labels, 0)
# T_labels = torch.argmax(T_labels, 0)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.no_grad():
soft_target = netT(T_input)
with torch.set_grad_enabled(phase == 'Training'):
outputs = model(inputs) # _,
#print(outputs.dim())
_, preds = torch.max(outputs, 1)
confusion_matrix = torch.zeros(10, 10)
for t, p in zip(labels.view(-1), preds.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
acc_matrix_batch = (confusion_matrix.diag()/confusion_matrix.sum(1))
# _, T_preds = torch.max(soft_target, 1)
T = 2
outputs_S = F.log_softmax(outputs/T, dim=1)
outputs_T = F.softmax(soft_target/T, dim=1)
# print(outputs_S.size())
# print(outputs_T.size())
loss2 = criterion2(outputs_S, outputs_T) * T * T
#print(preds)
if phase == 'Training':
inputs, y_a, y_b, lam = mixup_data(inputs, labels)
inputs, y_a, y_b = map(Variable, (inputs, y_a, y_b))
# print(y_a)
# print(y_b)
loss = mixup_criterion(criterion, outputs, y_a, y_b, lam)
loss = loss*(1-kd_alpha) + loss2*kd_alpha
else:
loss = criterion(outputs, labels)
loss = loss*(1-kd_alpha) + loss2*kd_alpha
# backward + optimize only if in training phase
if phase == 'Training':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
acc_matrix_sum += acc_matrix_batch
acc_matrix = acc_matrix_sum / test_size
print("acc for each class: {}".format(acc_matrix))
#################
if phase == 'Training':
scheduler.step()
epoch_loss = running_loss / test_size
epoch_acc = running_corrects.double() / test_size
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
if phase == "Training":
Loss_list.append(epoch_loss)
Accuracy_list.append(100 * epoch_acc)
else:
T_Loss_list.append(epoch_loss)
T_Accuracy_list.append(100 * epoch_acc)
# deep copy the model
if phase == 'Test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
PATH = os.path.join(str(exp_num), "resnet34_kd_best.pt")#"resnet18_model_epoch{}.pt".format(epoch)
if not os.path.exists(str(exp_num)):
os.makedirs(str(exp_num))
torch.save(model, PATH)
time_elapsed = time.time() - since
print('Time from Start {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
if phase == 'Training' and epoch_acc > best_train_acc:
best_train_acc = epoch_acc
# PATH = os.path.join(str(exp_num), "resnet34_kd_best.pt")#"resnet18_model_epoch{}.pt".format(epoch)
# if not os.path.exists(str(exp_num)):
# os.makedirs(str(exp_num))
# torch.save(model, PATH)
print()
PATH = os.path.join(str(exp_num), "resnet34_kd{}.pt".format(epoch))#"resnet18_model_epoch{}.pt".format(epoch)
if not os.path.exists(str(exp_num)):
os.makedirs(str(exp_num))
torch.save(model, PATH)
# torch.save(best_model_wts, "best.pt")
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best train Acc: {:4f}'.format(best_train_acc))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
##### PLOT
x1 = range(0, num_epochs)
x2 = range(0, num_epochs)
y1 = Accuracy_list
y2 = Loss_list
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('Train accuracy vs. epoches')
plt.ylabel('Train accuracy')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('Train loss vs. epoches')
plt.ylabel('Train loss')
plt.show()
plt.savefig("Train_accuracy_loss.jpg")
x1 = range(0, num_epochs)
x2 = range(0, num_epochs)
y1 = T_Accuracy_list
y2 = T_Loss_list
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('Test accuracy vs. epoches')
plt.ylabel('Test accuracy')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('Test loss vs. epoches')
plt.ylabel('Test loss')
plt.show()
plt.savefig("Test_accuracy_loss.jpg")
return model
model_ft = model_ft.to(device)
# #criterion = nn.CrossEntropyLoss()
criterion = nn.CrossEntropyLoss(weight=weights) #weight=weights,
# def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
# since = time.time()
# print("---------------Start KD FIT( TEACHER AND STUDENT )-----------------")
# best_model_wts = copy.deepcopy(model.state_dict())
# best_acc = 0.0
# best_train_acc = 0.0
# kd_alpha = 0.8
# Loss_list = []
# Accuracy_list = []
# T_Loss_list = []
# T_Accuracy_list = []
# for epoch in trange(num_epochs):
# print('Epoch {}/{}'.format(epoch, num_epochs - 1))
# # Each epoch has a training and validation phase
# for phase in ['Training', 'Test']:
# ##################################### train#############################
# if phase == 'Training':
# model.train() # Set model to training mode
# else:
# model.eval() # Set model to evaluate mode
# running_loss = 0.0
# running_corrects = 0
# # Iterate over data.
# if phase == "Training":
# for SAR, EO in tqdm(data_loader):
# inputs, labels = SAR[0], SAR[1]
# inputs = inputs.to(device)
# labels = labels.to(device)
# T_input, T_labels = EO[0], EO[1]
# T_input = T_input.to(device)
# T_labels = T_labels.to(device)
# # print(T_labels, labels)
# # labels = torch.argmax(labels, 0)
# # T_labels = torch.argmax(T_labels, 0)
# # zero the parameter gradients
# optimizer.zero_grad()
# # forward
# # track history if only in train
# with torch.no_grad():
# soft_target = netT(T_input)
# with torch.set_grad_enabled():
# outputs = model(inputs) # _,
# #print(outputs.dim())
# _, preds = torch.max(outputs, 1)
# # _, T_preds = torch.max(soft_target, 1)
# T = 2
# outputs_S = F.log_softmax(outputs/T, dim=1)
# outputs_T = F.softmax(soft_target/T, dim=1)
# # print(outputs_S.size())
# # print(outputs_T.size())
# loss2 = criterion2(outputs_S, outputs_T) * T * T
# #print(preds)
# inputs, y_a, y_b, lam = mixup_data(inputs, labels)
# inputs, y_a, y_b = map(Variable, (inputs, y_a, y_b))
# # print(y_a)
# # print(y_b)
# loss = mixup_criterion(criterion, outputs, y_a, y_b, lam)
# loss = loss*(1-kd_alpha) + loss2*kd_alpha
# running_loss += loss.item() * inputs.size(0)
# running_corrects += torch.sum(preds == labels.data)
# loss.backward()
# optimizer.step()
# scheduler.step()
# ##############################test#############################
# else:
# for SAR, EO in tqdm(test_data_loader):
# inputs, labels = SAR[0], SAR[1]
# inputs = inputs.to(device)
# labels = labels.to(device)
# T_input, T_labels = EO[0], EO[1]
# T_input = T_input.to(device)
# T_labels = T_labels.to(device)
# optimizer.zero_grad()
# # forward
# # track history if only in train
# with torch.no_grad():
# soft_target = netT(T_input)
# outputs = model(inputs) # _,
# #print(outputs.dim())
# _, preds = torch.max(outputs, 1)
# # _, T_preds = torch.max(soft_target, 1)
# T = 2
# outputs_S = F.log_softmax(outputs/T, dim=1)
# outputs_T = F.softmax(soft_target/T, dim=1)
# # print(outputs_S.size())
# # print(outputs_T.size())
# loss2 = criterion2(outputs_S, outputs_T) * T * T
# loss = criterion(outputs, labels)
# loss = loss*(1-kd_alpha) + loss2*kd_alpha
# ################################
# running_loss += loss.item() * inputs.size(0)
# running_corrects += torch.sum(preds == labels.data)
# epoch_loss = running_loss / dataset_sizes[phase]
# epoch_acc = running_corrects.double() / dataset_sizes[phase]
# print('{} Loss: {:.4f} Acc: {:.4f}'.format(
# phase, epoch_loss, epoch_acc))
# if phase == "Training":
# Loss_list.append(epoch_loss)
# Accuracy_list.append(100 * epoch_acc)
# else:
# T_Loss_list.append(epoch_loss)
# T_Accuracy_list.append(100 * epoch_acc)
# # deep copy the model
# if phase == 'Test' and epoch_acc > best_acc:
# best_acc = epoch_acc
# best_model_wts = copy.deepcopy(model.state_dict())
# PATH = os.path.join(str(exp_num), "resnet34_kd_best.pt")#"resnet18_model_epoch{}.pt".format(epoch)
# if not os.path.exists(str(exp_num)):
# os.makedirs(str(exp_num))
# torch.save(model, PATH)
# time_elapsed = time.time() - since
# print('Time from Start {:.0f}m {:.0f}s'.format(
# time_elapsed // 60, time_elapsed % 60))
# if phase == 'Training' and epoch_acc > best_train_acc:
# best_train_acc = epoch_acc
# # PATH = os.path.join(str(exp_num), "resnet34_kd_best.pt")#"resnet18_model_epoch{}.pt".format(epoch)
# # if not os.path.exists(str(exp_num)):
# # os.makedirs(str(exp_num))
# # torch.save(model, PATH)
# print()
# PATH = os.path.join(str(exp_num), "resnet34_kd{}.pt".format(epoch))#"resnet18_model_epoch{}.pt".format(epoch)
# if not os.path.exists(str(exp_num)):
# os.makedirs(str(exp_num))
# torch.save(model, PATH)
# time_elapsed = time.time() - since
# print('Training complete in {:.0f}m {:.0f}s'.format(
# time_elapsed // 60, time_elapsed % 60))
# print('Best train Acc: {:4f}'.format(best_train_acc))
# print('Best val Acc: {:4f}'.format(best_acc))
# # load best model weights
# model.load_state_dict(best_model_wts)
# ##### PLOT
# x1 = range(0, num_epochs)
# x2 = range(0, num_epochs)
# y1 = Accuracy_list
# y2 = Loss_list
# plt.subplot(2, 1, 1)
# plt.plot(x1, y1, 'o-')
# plt.title('Train accuracy vs. epoches')
# plt.ylabel('Train accuracy')
# plt.subplot(2, 1, 2)
# plt.plot(x2, y2, '.-')
# plt.xlabel('Train loss vs. epoches')
# plt.ylabel('Train loss')
# plt.show()
# plt.savefig("Train_accuracy_loss.jpg")
# x1 = range(0, num_epochs)
# x2 = range(0, num_epochs)
# y1 = T_Accuracy_list
# y2 = T_Loss_list
# plt.subplot(2, 1, 1)
# plt.plot(x1, y1, 'o-')
# plt.title('Test accuracy vs. epoches')
# plt.ylabel('Test accuracy')
# plt.subplot(2, 1, 2)
# plt.plot(x2, y2, '.-')
# plt.xlabel('Test loss vs. epoches')
# plt.ylabel('Test loss')
# plt.show()
# plt.savefig("Test_accuracy_loss.jpg")
# return model
# model_ft = model_ft.to(device)
# # #criterion = nn.CrossEntropyLoss()
# criterion = nn.CrossEntropyLoss(weight=weights) #weight=weights,
# os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.5)
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=120)
!mkdir test
!unzip NTIRE2021_Class_test_images_EO.zip -d ./test
model = torch.load("45_kd_sar-teacher_eo-student_pretrain-on-sar/resnet34_kd114.pt")
import pandas as pd
from torch.utils import data
from tqdm import tqdm
from PIL import Image
class ImageData(data.Dataset):
def __init__(self,df,dirpath,transform,test = False):
self.df = df
self.test = test
self.dirpath = dirpath
self.conv_to_tensor = transform
#image data
if not self.test:
self.image_arr = np.asarray(str(self.dirpath)+'/'+self.df.iloc[:, 0]+'.png')
else:
self.image_arr = np.asarray(str(self.dirpath)+'/'+self.df.iloc[:, 0])
#labels data
if not self.test:
self.label_df = self.df.iloc[:,1]
# Calculate length of df
self.data_len = len(self.df.index)
def __len__(self):
return self.data_len
def __getitem__(self, idx):
image_name = self.image_arr[idx]
img = Image.open(image_name)#.convert('RGB')
img_tensor = self.conv_to_tensor(img)
if not self.test:
image_labels = self.label_df[idx]
label_tensor = torch.zeros((1, output_dim))
for label in image_labels.split():
label_tensor[0, int(label)] = 1
image_label = torch.tensor(label_tensor,dtype= torch.float32)
return (img_tensor,image_label.squeeze())
return (img_tensor)
BATCH_SIZE = 1
test_dir = "./test"
test_dir_ls = os.listdir(test_dir)
test_dir_ls.sort()
test_df = pd.DataFrame(test_dir_ls)
test_dataset = ImageData(test_df,test_dir,EO_data_transforms["valid_EO"],test = True)
test_loader = data.DataLoader(dataset=test_dataset,batch_size=BATCH_SIZE,shuffle=False)
output_dim = 10
DISABLE_TQDM = False
predictions = np.zeros((len(test_dataset), output_dim))
i = 0
for test_batch in tqdm(test_loader,disable = DISABLE_TQDM):
test_batch = test_batch.to(device)
batch_prediction = model(test_batch).detach().cpu().numpy()
predictions[i * BATCH_SIZE:(i+1) * BATCH_SIZE, :] = batch_prediction
i+=1
predictions[170]
```
### submission balance for class 0
```
m = nn.Softmax(dim=1)
predictions_tensor = torch.from_numpy(predictions)
output_softmax = m(predictions_tensor)
# output_softmax = output_softmax/output_softmax.sum()
pred = np.argmax(predictions,axis = 1)
plot_ls = []
idx = 0
for each_pred in pred:
if each_pred == 0:
plot_ls.append(output_softmax[idx][0].item())
idx+=1
# plot_ls
# idx = 0
# # print(output_softmax)
# for i in pred:
# # print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
# if i == 0:
# new_list = set(predictions[idx])
# new_list.remove(max(new_list))
# index = predictions[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# idx+=1
import matplotlib.pyplot as plt
plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
plot_ls.sort()
val = plot_ls[-85]
print(val)
plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# print(output_softmax)
idx = 0
counter = 0
for i in pred:
# print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
if i == 0 and output_softmax[idx][0] < val:
new_list = set(predictions[idx])
new_list.remove(max(new_list))
index = predictions[idx].tolist().index(max(new_list))
# index = predictions[idx].index()
# print(index)
pred[idx] = index
output_softmax[idx][0] = -100.0
counter += 1
idx+=1
print(counter)
```
### submission balance for class 1
```
plot_ls = []
idx = 0
for each_pred in pred:
if each_pred == 1:
plot_ls.append(output_softmax[idx][1].item())
idx+=1
# plot_ls
# idx = 0
# # print(output_softmax)
# for i in pred:
# # print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
# if i == 0:
# new_list = set(predictions[idx])
# new_list.remove(max(new_list))
# index = predictions[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# idx+=1
import matplotlib.pyplot as plt
plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
plot_ls.sort()
val = plot_ls[-85]
print(val)
plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# print(output_softmax)
idx = 0
counter = 0
for i in pred:
# print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
if i == 1 and output_softmax[idx][1] < val:
new_list = set(output_softmax[idx])
new_list.remove(max(new_list))
index = output_softmax[idx].tolist().index(max(new_list))
# index = predictions[idx].index()
# print(index)
pred[idx] = index
output_softmax[idx][1] = -100.0
counter += 1
idx+=1
print(counter)
```
### submission balance for class 2
```
plot_ls = []
idx = 0
for each_pred in pred:
if each_pred == 2:
plot_ls.append(output_softmax[idx][2].item())
idx+=1
# plot_ls
# idx = 0
# # print(output_softmax)
# for i in pred:
# # print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
# if i == 0:
# new_list = set(predictions[idx])
# new_list.remove(max(new_list))
# index = predictions[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# idx+=1
import matplotlib.pyplot as plt
plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
plot_ls.sort()
val = plot_ls[-85]
print(val)
plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# print(output_softmax)
idx = 0
counter = 0
for i in pred:
# print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
if i == 2 and output_softmax[idx][2] < val:
new_list = set(output_softmax[idx])
new_list.remove(max(new_list))
index = output_softmax[idx].tolist().index(max(new_list))
# index = predictions[idx].index()
# print(index)
pred[idx] = index
output_softmax[idx][2] = -100.0
counter += 1
idx+=1
print(counter)
```
### submission balance for class 3
```
plot_ls = []
idx = 0
for each_pred in pred:
if each_pred == 3:
plot_ls.append(output_softmax[idx][3].item())
idx+=1
# plot_ls
# idx = 0
# # print(output_softmax)
# for i in pred:
# # print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
# if i == 0:
# new_list = set(predictions[idx])
# new_list.remove(max(new_list))
# index = predictions[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# idx+=1
import matplotlib.pyplot as plt
plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
plot_ls.sort()
val = plot_ls[-85]
print(val)
plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# print(output_softmax)
idx = 0
counter = 0
for i in pred:
# print(predictions_tensor[idx])
# each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# print(each_output_softmax)
if i == 3 and output_softmax[idx][3] < val:
new_list = set(output_softmax[idx])
new_list.remove(max(new_list))
index = output_softmax[idx].tolist().index(max(new_list))
# index = predictions[idx].index()
# print(index)
pred[idx] = index
output_softmax[idx][3] = -100.0
counter += 1
idx+=1
print(counter)
```
### submission balance for class 4
```
# plot_ls = []
# idx = 0
# for each_pred in pred:
# if each_pred == 4:
# plot_ls.append(output_softmax[idx][4].item())
# idx+=1
# # plot_ls
# # idx = 0
# # # print(output_softmax)
# # for i in pred:
# # # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# # if i == 0:
# # new_list = set(predictions[idx])
# # new_list.remove(max(new_list))
# # index = predictions[idx].tolist().index(max(new_list))
# # # index = predictions[idx].index()
# # # print(index)
# # idx+=1
# import matplotlib.pyplot as plt
# plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
# plot_ls.sort()
# val = plot_ls[-85]
# print(val)
# plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# # print(output_softmax)
# idx = 0
# counter = 0
# for i in pred:
# # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# if i == 4 and output_softmax[idx][4] < val:
# new_list = set(output_softmax[idx])
# new_list.remove(max(new_list))
# index = output_softmax[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# pred[idx] = index
# output_softmax[idx][4] = -100.0
# counter += 1
# idx+=1
# print(counter)
```
### submission balance for class 5
```
# plot_ls = []
# idx = 0
# for each_pred in pred:
# if each_pred == 5:
# plot_ls.append(output_softmax[idx][5].item())
# idx+=1
# # plot_ls
# # idx = 0
# # # print(output_softmax)
# # for i in pred:
# # # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# # if i == 0:
# # new_list = set(predictions[idx])
# # new_list.remove(max(new_list))
# # index = predictions[idx].tolist().index(max(new_list))
# # # index = predictions[idx].index()
# # # print(index)
# # idx+=1
# import matplotlib.pyplot as plt
# plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
# plot_ls.sort()
# val = plot_ls[-85]
# print(val)
# plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# # print(output_softmax)
# idx = 0
# counter = 0
# for i in pred:
# # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# if i == 5 and output_softmax[idx][5] < val:
# new_list = set(output_softmax[idx])
# new_list.remove(max(new_list))
# index = output_softmax[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# pred[idx] = index
# output_softmax[idx][5] = -100.0
# counter += 1
# idx+=1
# print(counter)
```
### submission balance for class 6 not arrange
```
# plot_ls = []
# idx = 0
# for each_pred in pred:
# if each_pred == 6:
# plot_ls.append(output_softmax[idx][6].item())
# idx+=1
# # plot_ls
# # idx = 0
# # # print(output_softmax)
# # for i in pred:
# # # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# # if i == 0:
# # new_list = set(predictions[idx])
# # new_list.remove(max(new_list))
# # index = predictions[idx].tolist().index(max(new_list))
# # # index = predictions[idx].index()
# # # print(index)
# # idx+=1
# import matplotlib.pyplot as plt
# plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
# plot_ls.sort()
# val = plot_ls[-85]
# print(val)
# plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# # print(output_softmax)
# idx = 0
# counter = 0
# for i in pred:
# # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# if i == 6 and output_softmax[idx][6] < val:
# new_list = set(output_softmax[idx])
# new_list.remove(max(new_list))
# index = output_softmax[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# pred[idx] = index
# output_softmax[idx][6] = -100.0
# counter += 1
# idx+=1
# print(counter)
len(plot_ls)
```
### submission balance for class 7
```
# plot_ls = []
# idx = 0
# for each_pred in pred:
# if each_pred == 7:
# plot_ls.append(output_softmax[idx][7].item())
# idx+=1
# # plot_ls
# # idx = 0
# # # print(output_softmax)
# # for i in pred:
# # # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# # if i == 0:
# # new_list = set(predictions[idx])
# # new_list.remove(max(new_list))
# # index = predictions[idx].tolist().index(max(new_list))
# # # index = predictions[idx].index()
# # # print(index)
# # idx+=1
# import matplotlib.pyplot as plt
# plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
# plot_ls.sort()
# val = plot_ls[-85]
# print(val)
# plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
# # print(output_softmax)
# idx = 0
# counter = 0
# for i in pred:
# # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# if i == 7 and output_softmax[idx][7] < val:
# new_list = set(output_softmax[idx])
# new_list.remove(max(new_list))
# index = output_softmax[idx].tolist().index(max(new_list))
# # index = predictions[idx].index()
# # print(index)
# pred[idx] = index
# output_softmax[idx][7] = -100.0
# counter += 1
# idx+=1
# print(counter)
```
### submission balance for class 8
### submission balance for class 9
```
# plot_ls = []
# idx = 0
# for each_pred in pred:
# if each_pred == 9:
# plot_ls.append(output_softmax[idx][9].item())
# idx+=1
# # plot_ls
# # idx = 0
# # # print(output_softmax)
# # for i in pred:
# # # print(predictions_tensor[idx])
# # each_output_softmax = output_softmax[idx]/output_softmax[idx].sum()
# # print(each_output_softmax)
# # if i == 0:
# # new_list = set(predictions[idx])
# # new_list.remove(max(new_list))
# # index = predictions[idx].tolist().index(max(new_list))
# # # index = predictions[idx].index()
# # # print(index)
# # idx+=1
# import matplotlib.pyplot as plt
# plt.hist(plot_ls, bins=80, histtype="stepfilled", alpha=.8)
# plot_ls.sort()
# val = plot_ls[-85]
# print(val)
# plt.vlines(val, ymin = 0, ymax = 22, colors = 'r')
pred
# pred = np.argmax(predictions,axis = 1)
pred_list = []
for i in range(len(pred)):
result = [pred[i]]
pred_list.append(result)
pred_list
predicted_class_idx = pred_list
test_df['class_id'] = predicted_class_idx
test_df['class_id'] = test_df['class_id'].apply(lambda x : ' '.join(map(str,list(x))))
test_df = test_df.rename(columns={0: 'image_id'})
test_df['image_id'] = test_df['image_id'].apply(lambda x : x.split('.')[0])
test_df
for (idx, row) in test_df.iterrows():
row.image_id = row.image_id.split("_")[1]
for k in range(10):
i = 0
for (idx, row) in test_df.iterrows():
if row.class_id == str(k):
i+=1
print(i)
test_df
test_df.to_csv('results.csv',index = False)
```
|
github_jupyter
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Particionando-bases-de-treino-e-teste-com-split-70-30%" data-toc-modified-id="Particionando-bases-de-treino-e-teste-com-split-70-30%-1"><span class="toc-item-num">1 </span>Particionando bases de treino e teste com split 70-30%</a></span></li><li><span><a href="#Particionando-bases-de-treino-e-teste-com-diferentes-músicas" data-toc-modified-id="Particionando-bases-de-treino-e-teste-com-diferentes-músicas-2"><span class="toc-item-num">2 </span>Particionando bases de treino e teste com diferentes músicas</a></span></li></ul></div>
```
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import glob
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.utils import class_weight
from imblearn.under_sampling import RandomUnderSampler
tf.__version__
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
df = pd.read_csv('E:\chord-detection-challenge\DataBase\CSV/status_A1.csv')
df.sort_values(by='title', inplace=True)
images_path = sorted(glob.glob('E:\chord-detection-challenge\DataBase\clean_windows/Train/*'))
df['Unnamed: 0'] = np.array(images_path)
df.reset_index(inplace=True, drop=True)
df['status'] = df['status'].astype(str)
```
### Particionando bases de treino e teste com split 70-30%
```
### particiona considerando 70-30% e mantendo a frequência de amostras para treino, validação e teste de acordo com as colunas título e status (rótulo da rede)
X_train, X_test, y_train, y_test = train_test_split(df[['Unnamed: 0', 'title']], df['status'], test_size=0.30, random_state=42, stratify=df[['status', 'title']])
df_train = pd.concat([X_train, y_train], axis=1)
X_train, X_val, y_train, y_val = train_test_split(df_train[['Unnamed: 0', 'title']], df_train['status'], test_size=0.30, random_state=42, stratify=df_train[['status', 'title']])
### contatena atributos de entrada e rótulo em um único dataframe para utilizar o flow_from_dataframe do tensorflow
df_test = pd.concat([X_test, y_test], axis=1)
df_train = pd.concat([X_train, y_train], axis=1)
df_val = pd.concat([X_val, y_val], axis=1)
print('Total de imagens de treinamento', len(df_train))
print('Total de imagens de validação', len(df_val))
print('Total de imagens de teste', len(df_test))
undersample_train = RandomUnderSampler(sampling_strategy='majority')
undersample_validation = RandomUnderSampler(sampling_strategy='majority')
X_undertrain, y_undertrain = undersample_train.fit_resample(df_train[['Unnamed: 0', 'title']], df_train['status'])
X_undervalidation, y_undervalidation = undersample_validation.fit_resample(df_val[['Unnamed: 0', 'title']], df_val['status'])
df_train = pd.concat([X_undertrain, y_undertrain], axis=1)
df_val = pd.concat([X_undervalidation, y_undervalidation], axis=1)
```
### Particionando bases de treino e teste com diferentes músicas
```
songs, n = df['title'].unique(), 5
index = np.random.choice(len(songs), 5, replace=False)
selected_songs = songs[index] ## seleciona n músicas disponíveis para teste
df_test = df[df['title'].isin(selected_songs)] ## banco de teste contém todos os espectrogramas das n músicas selecionadas anteriormemente
df_train = df[~(df['title'].isin(selected_songs))] ## banco de treino contém os espectrogramas de todas as músicas EXCETO as selecionadas anteriormente para teste
X_train, X_val, y_train, y_val = train_test_split(df_train[['Unnamed: 0', 'title']], df_train['status'], test_size=0.30, random_state=42, stratify=df_train[['status', 'title']]) ## divide em validação considerando 30% e balanceamento de acordo com título e status
### contatena atributos de entrada e rótulo em um único dataframe para utilizar o flow_from_dataframe do tensorflow
df_train = pd.concat([X_train, y_train], axis=1)
df_val = pd.concat([X_val, y_val], axis=1)
print('Total de imagens de treinamento', len(df_train))
print('Total de imagens de validação', len(df_val))
print('Total de imagens de teste', len(df_test))
datagen=ImageDataGenerator(rescale=1./255)
train_generator=datagen.flow_from_dataframe(dataframe=df_train, directory='E:\chord-detection-challenge\DataBase\clean_windows/Train/', x_col='Unnamed: 0', y_col="status", class_mode="binary", target_size=(224,224), batch_size=32)
valid_generator=datagen.flow_from_dataframe(dataframe=df_val, directory='E:\chord-detection-challenge\DataBase\clean_windows/Train/', x_col='Unnamed: 0', y_col="status", class_mode="binary", target_size=(224,224), batch_size=32)
test_generator=datagen.flow_from_dataframe(dataframe=df_test, directory='E:\chord-detection-challenge\DataBase\clean_windows/Train/', x_col='Unnamed: 0', y_col="status", class_mode="binary", target_size=(224,224), batch_size=1,shuffle=False)
#from tensorflow.keras.models import Model
restnet = tf.keras.applications.VGG16(
include_top=False, # não vai aproveitar a camada de saída
weights=None, #não pega os pesso da imagenet
input_shape=(224,224,3)
)
output = restnet.layers[-1].output
output = tf.keras.layers.Flatten()(output)
restnet = tf.keras.Model(inputs=restnet.input, outputs=output)
for layer in restnet.layers: #treina tudo do zero
layer.trainable = True
restnet.summary()
mc = tf.keras.callbacks.ModelCheckpoint('resnet_model.h5', monitor='val_binary_accuracy', mode='max', save_best_only=True)
model = tf.keras.models.Sequential()
model.add(restnet)
model.add(tf.keras.layers.Dense(128, activation='relu', input_dim=(224,224,3)))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# tf.keras.layers.Conv2D(32, (3, 3), padding='same',
# input_shape=(32,32,3)),
# tf.keras.layers.MaxPool2D(),
# tf.keras.layers.Conv2D(64, (3, 3)),
# tf.keras.layers.Conv2D(128, (3, 3)),
# tf.keras.layers.Flatten(),
# tf.keras.layers.Dense(128,activation='relu'),
# tf.keras.layers.Dense(2)
#)
model.compile(
optimizer=tf.keras.optimizers.Adamax(),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()]
#weighted_metrics=[tf.keras.metrics.BinaryAccuracy()]
)
class_weights = class_weight.compute_class_weight('balanced',
np.unique(df_train['status']),
df_train['status'])
class_weights = dict(enumerate(class_weights))
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
model.fit(train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
#class_weight=class_weights,
epochs=10,
callbacks = [mc])
STEP_SIZE_TEST=test_generator.n//test_generator.batch_size
print('---------------Teste-------------')
test_generator.reset()
predictions = model.predict(test_generator,
steps=STEP_SIZE_TEST,
verbose=1)
predictions
y_pred = predictions > 0.5
predicted_class_indices=np.argmax(predictions,axis=1)
predicted_class_indices
print(accuracy_score(test_generator.labels, y_pred))
print(classification_report(test_generator.labels, y_pred))
```
|
github_jupyter
|
# Dimensionality Reduction
Reducing number of dimensions whcih means that the number of new features is lower than the number of original features.
First, we need to import numpy, matplotlib, and scikit-learn and get the UCI ML digit image data. Scikit-learn already comes with this data (or will automatically download it for you) so we don’t have to deal with uncompressing it ourselves! Additionally, I’ve provided a function that will produce a nice visualization of our data.
We are going to use the following libraries and packages:
* **numpy**: "NumPy is the fundamental package for scientific computing with Python." (http://www.numpy.org/)
* **matplotlib**: "Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms." (https://matplotlib.org/)
* **sklearn**: Scikit-learn is a machine learning library for Python programming language. (https://scikit-learn.org/stable/)
* **pandas**: "Pandas provides easy-to-use data structures and data analysis tools for Python." (https://pandas.pydata.org/)
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import offsetbox
import pandas as pd
```
# t-distributed Stochastic Neighbor Embedding (t-SNE)
t-SNE is an algorithm to optimally map the higher dimensional space to lower dimensions paying attention to short distances. The trasformation is different for different regions. SNE is the general concept behind this type of mapping and "t" shows usage of t-distribution in t-SNE.
## Synthetic data
Let's generate synthetic data as follows:
1) Points are scattered in 2 dimensional space as follows. There are N-2 other dimensions that all the points have same values in each dimension
2) We will reduce the dimensionality of the data to 2D
```
group_1_X = np.repeat(2,90)+np.random.normal(loc=0, scale=1,size=90)
group_1_Y = np.repeat(2,90)+np.random.normal(loc=0, scale=1,size=90)
group_2_X = np.repeat(10,90)+np.random.normal(loc=0, scale=1,size=90)
group_2_Y = np.repeat(10,90)+np.random.normal(loc=0, scale=1,size=90)
plt.scatter(group_1_X,group_1_Y, c='blue')
plt.scatter(group_2_X,group_2_Y,c='green')
plt.xlabel('1st dimension')
plt.ylabel('2nd dimension')
```
### Implementing t-SNE on the synthetic data
```
####
combined = np.column_stack((np.concatenate([group_1_X,group_2_X]),np.concatenate([group_1_Y,group_2_Y])))
print(combined.shape)
####
from sklearn import manifold
combined_tSNE = manifold.TSNE(n_components=2, init='pca',perplexity=30,learning_rate=200,n_iter=500,random_state=2).fit_transform(combined)
####
import umap
combined_UMAP = umap.UMAP(n_neighbors=10, min_dist=0.3, n_components=2,random_state=2).fit_transform(combined)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(combined_tSNE[0:90,0], combined_tSNE[0:90,1], c='blue')
ax1.scatter(combined_tSNE[90:180,0], combined_tSNE[90:180,1], c='green')
ax1.set_title('t-SNE')
ax2.scatter(combined_UMAP[0:90,0], combined_UMAP[0:90,1], c='blue')
ax2.scatter(combined_UMAP[90:180,0], combined_UMAP[90:180,1], c='green')
ax2.set_title('UMAP')
```
**Parameters of t-SNE:**
* ***Perplexity (perplexity)***: somehow shows the number of close neighbors each point has. Hence, perplexity should be smaller than the number of points. There is a suggested range for perplexity in the original paper: "The performance of SNE is fairly robust to changes in the perplexity, and typical values are between 5 and 50.". Although perplexity=5 is usually not optimal, values higher than 50 also may result in weird grouping of the data points and shapes in 2 dimensional space.
* ***Number of iterations (n_iter)*** required for converagence of the approach is another important parameter that depened on the input dataset. There are no fixed number to make sure of the convergence but there are some rule of thumb to check that. As an example, if there are pinched shapes in the t-SNE plot, it is better to run the approach for higher iteration number to makes sure that the resulted shapes and clusters are not artifacts of an unconverged t-SNE.
**Parameters of UMAP:**
* ***Number of neighbors (n_neighbors)***: Number of neighboring data points used in the process of local manifold approximation. This parameters is suggested to be between 5 and 50.
* ***Minimum distance (min_dist)***: It is a measure of allowed compression of points together in low dimensional space. This parameters is suggested to be between 0.001 and 0.5.
### Let's change the structure of synthetic data
Let's generate synthetic data as follows:
```
group_1_X = np.arange(10,100)
group_1_Y = np.arange(10,100)+np.random.normal(loc=0, scale=0.3,size=90)-np.repeat(4,90)
group_2_X = np.arange(10,100)
group_2_Y = np.arange(10,100)+np.random.normal(loc=0, scale=0.3,size=90)+np.repeat(4,90)
plt.scatter(group_1_X,group_1_Y, c='blue')
plt.scatter(group_2_X,group_2_Y,c='green')
plt.xlabel('1st dimension')
plt.ylabel('2nd dimension')
```
### Implementing t-SNE and UMAP on the synthetic data
```
####
combined = np.column_stack((np.concatenate([group_1_X,group_2_X]),np.concatenate([group_1_Y,group_2_Y])))
print(combined.shape)
####
from sklearn import manifold
combined_tSNE = manifold.TSNE(n_components=2, init='pca',perplexity=30,learning_rate=200,n_iter=500,random_state=2).fit_transform(combined)
####
import umap
combined_UMAP = umap.UMAP(n_neighbors=5, min_dist=0.01, n_components=2,random_state=2).fit_transform(combined)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(combined_tSNE[0:90,0], combined_tSNE[0:90,1], c='blue')
ax1.scatter(combined_tSNE[90:180,0], combined_tSNE[90:180,1], c='green')
ax1.set_title('t-SNE')
ax2.scatter(combined_UMAP[0:90,0], combined_UMAP[0:90,1], c='blue')
ax2.scatter(combined_UMAP[90:180,0], combined_UMAP[90:180,1], c='green')
ax2.set_title('UMAP')
```
### Another synthetic data
Let's generate synthetic data as follows:
```
group_1_X = np.arange(start=0,stop=1**2,step=0.001)
group_1_Y = np.sqrt(np.repeat(1**2,1000)-group_1_X**2)
group_2_X = np.arange(start=0,stop=1.5,step=0.001)
group_2_Y = np.sqrt(np.repeat(1.5**2,1500)-group_2_X**2)
plt.scatter(group_1_X,group_1_Y, c='blue', )
plt.scatter(group_2_X,group_2_Y,c='green')
plt.xlabel('1st dimension')
plt.ylabel('2nd dimension')
plt.xlim(0,2.5)
plt.ylim(0,2.5)
```
### Implementing t-SNE on the synthetic data
```
####
combined = np.column_stack((np.concatenate([group_1_X,group_2_X]),np.concatenate([group_1_Y,group_2_Y])))
print(combined.shape)
####
from sklearn import manifold
combined_tSNE = manifold.TSNE(n_components=2, init='pca',perplexity=30,learning_rate=200,n_iter=500,random_state=2).fit_transform(combined)
####
import umap
combined_UMAP = umap.UMAP(n_neighbors=10, min_dist=0.9, n_components=2,random_state=2).fit_transform(combined)
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.scatter(combined_tSNE[0:1000,0], combined_tSNE[0:1000,1], c='blue')
ax1.scatter(combined_tSNE[1000:2500,0], combined_tSNE[1000:2500,1], c='green')
ax1.set_title('t-SNE')
ax2.scatter(combined_UMAP[0:1000,0], combined_UMAP[0:1000,1], c='blue')
ax2.scatter(combined_UMAP[1000:2500,0], combined_UMAP[1000:2500,1], c='green')
ax2.set_title('UMAP')
```
### UCI ML digit image data
* load and return digit data set
```
from sklearn import datasets
# Loading digit images
digits = datasets.load_digits()
X = digits.data
y = digits.target
n_samples, n_features = X.shape
print("number of samples (data points):", n_samples)
print("number of features:", n_features)
```
Pixels of images have values between 0 and 16:
```
np.max(X)
```
Let's write a function to use it for visualization of the results of all the dimension reduction methods.
#### Let's visualize some of the images
```
fig, ax_array = plt.subplots(1,10)
axes = ax_array.flatten()
for i, ax in enumerate(axes):
ax.imshow(digits.images[i])
plt.setp(axes, xticks=[], yticks=[])
plt.tight_layout(h_pad=0.5, w_pad=0.01)
```
Now that we understood how t-SNE works, let's implement it on the UCI ML digit image data:
```
from sklearn import manifold
X_tsne = manifold.TSNE(n_components=2, init='pca',perplexity=30,learning_rate=200,n_iter=500,random_state=2).fit_transform(X)
```
Now, we use the plotting function to show the first 2 principle component scores of all teh data points.
```
def embedding_plot(X,labels,title):
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='Spectral', s=5)
plt.gca().set_facecolor((1, 1, 1))
plt.xlabel('1st dimension', fontsize=24)
plt.ylabel('2nd dimension', fontsize=24)
plt.colorbar(boundaries=np.arange(11)-0.5).set_ticks(np.arange(10))
plt.grid(False)
plt.title(title, fontsize=24);
embedding_plot(X_tsne, y,"t-SNE")
```
**t-SNE is an unsupervised approach similar to PCA and ICA. We add color for the sample labels afterward.**
## Normalizing data before dimensionality reduction
It is a good idea usually to normalize the data so that the scale of values for different features would become similar.
```
from sklearn import preprocessing
X_norm = pd.DataFrame(preprocessing.scale(X))
Xnorm_tsne = manifold.TSNE(n_components=2, init='pca',perplexity=30,learning_rate=200,n_iter=500,random_state=2).fit_transform(X_norm)
embedding_plot(Xnorm_tsne, y,"t-SNE")
```
# Uniform Manifold Approximation and Projection (UMAP)
UMAP is a manifold learning method that is comptetitive to t-SNE for visualization quality while preserving the global structure of data, unlike t-SNE. UMAP has no computational restriction and is scalable to extremely large dataset, like GoogleNews, unlike t-SNE.
UMAP uses k-nearest neighbor and uses Stochastic Gradient Descent to minimize the difference between the distances in the high dimensional and low dimensional spaces.
**Definitions**
* A n-dimensional manifold (n-manifold) M is a topological space that is locally homeomorphic to the Euclidean space of dimension n.
* Locally homeomorphic means that every point in the space M is contained in an open set U such that there is a one-to-one onto map f:U -> M.
* One-to-one onto map f:U -> M means that each element of M is mapped by exactly one element of U.
* A topological space is a collection of open sets (with some mathematical properties).
* A Riemannian (smooth) manifold M is a real smooth manifold with an inner product that varies smoothly from point to point in the tangent space of M.
* Riemannian metric is collection of all the inner products of the points in the manifold M on the tangent space of M.
* A simplicial complex K in n-dimensional real space is a collection of simplices in the space such that 1) Every face of a simplex of K is in K, and 2) The intersection of any two simplices of K is a face of each of them (Munkres 1993, p. 7; http://mathworld.wolfram.com/).
* A simplex is the generalization of a tetrahedral region of space to n dimensions(http://mathworld.wolfram.com/).
```
import umap
X_umap = umap.UMAP(n_neighbors=10, min_dist=0.3, n_components=2, random_state=2).fit_transform(X)
embedding_plot(X_umap, y,"umap")
```
## Boston housing dataset
```
from sklearn import datasets
# Loading digit images
housing = datasets.load_boston()
Xhouse = pd.DataFrame(housing.data)
Xhouse.columns = housing.feature_names
yhouse = housing.target
n_samples, n_features = Xhouse.shape
print("number of samples (data points):", n_samples)
print("number of features:", n_features)
```
### Normalizing the data
```
from sklearn import preprocessing
Xhouse_norm = pd.DataFrame(preprocessing.scale(Xhouse), columns=Xhouse.columns)
```
## Implementing t-SNE on the California housing data
```
Xhousenorm_tSNE = manifold.TSNE(n_components=2, init='pca',perplexity=30,learning_rate=200,n_iter=500,random_state=2).fit_transform(Xhouse_norm)
Xhousenorm_tSNE.shape
```
### Visualizing the results of t-SNE implemented on the California housing dataset
```
import seaborn as sns
cmap = sns.cubehelix_palette(as_cmap=True)
fig, ax = plt.subplots()
points = ax.scatter(x=Xhousenorm_tSNE[:,0], y=Xhousenorm_tSNE[:,1], c=yhouse, s=10, cmap=cmap)
fig.colorbar(points)
```
## Implementing UMAP on the Boston housing data
```
Xhousenorm_umap = umap.UMAP(n_neighbors=10, min_dist=0.4, n_components=2, random_state=2).fit_transform(Xhouse_norm)
fig, ax = plt.subplots()
points = ax.scatter(x=Xhousenorm_umap[:,0], y=Xhousenorm_umap[:,1], c=yhouse, s=10, cmap=cmap)
fig.colorbar(points)
```
### Removing outliers and repeating the analysis
```
Xhouse_norm_noout = Xhouse_norm.iloc[np.where((Xhouse_norm.max(axis=1) < 3)==True)[0],:]
Xhousenorm_noout_umap = umap.UMAP(n_neighbors=5, min_dist=0.4, n_components=2, random_state=2).fit_transform(Xhouse_norm_noout)
fig, ax = plt.subplots()
points = ax.scatter(x=Xhousenorm_noout_umap[:,0], y=Xhousenorm_noout_umap[:,1], c=yhouse[np.where((Xhouse_norm.max(axis=1) < 3)==True)[0]], s=10, cmap=cmap)
fig.colorbar(points)
```
|
github_jupyter
|
<div style="width:100%; background-color: #D9EDF7; border: 1px solid #CFCFCF; text-align: left; padding: 10px;">
<b>Time series: Processing Notebook</b>
<ul>
<li><a href="main.ipynb">Main Notebook</a></li>
<li>Processing Notebook</li>
</ul>
<br>This Notebook is part of the <a href="http://data.open-power-system-data.org/time_series">Time series Data Package</a> of <a href="http://open-power-system-data.org">Open Power System Data</a>.
</div>
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Introductory-Notes" data-toc-modified-id="Introductory-Notes-1"><span class="toc-item-num">1 </span>Introductory Notes</a></span></li><li><span><a href="#Settings" data-toc-modified-id="Settings-2"><span class="toc-item-num">2 </span>Settings</a></span><ul class="toc-item"><li><span><a href="#Set-version-number-and-recent-changes" data-toc-modified-id="Set-version-number-and-recent-changes-2.1"><span class="toc-item-num">2.1 </span>Set version number and recent changes</a></span></li><li><span><a href="#Import-Python-libraries" data-toc-modified-id="Import-Python-libraries-2.2"><span class="toc-item-num">2.2 </span>Import Python libraries</a></span></li><li><span><a href="#Display-options" data-toc-modified-id="Display-options-2.3"><span class="toc-item-num">2.3 </span>Display options</a></span></li><li><span><a href="#Set-directories" data-toc-modified-id="Set-directories-2.4"><span class="toc-item-num">2.4 </span>Set directories</a></span></li><li><span><a href="#Chromedriver" data-toc-modified-id="Chromedriver-2.5"><span class="toc-item-num">2.5 </span>Chromedriver</a></span></li><li><span><a href="#Set-up-a-log" data-toc-modified-id="Set-up-a-log-2.6"><span class="toc-item-num">2.6 </span>Set up a log</a></span></li><li><span><a href="#Select-timerange" data-toc-modified-id="Select-timerange-2.7"><span class="toc-item-num">2.7 </span>Select timerange</a></span></li><li><span><a href="#Select-download-source" data-toc-modified-id="Select-download-source-2.8"><span class="toc-item-num">2.8 </span>Select download source</a></span></li><li><span><a href="#Select-subset" data-toc-modified-id="Select-subset-2.9"><span class="toc-item-num">2.9 </span>Select subset</a></span></li></ul></li><li><span><a href="#Download" data-toc-modified-id="Download-3"><span class="toc-item-num">3 </span>Download</a></span><ul class="toc-item"><li><span><a href="#Automatic-download-(for-most-sources)" data-toc-modified-id="Automatic-download-(for-most-sources)-3.1"><span class="toc-item-num">3.1 </span>Automatic download (for most sources)</a></span></li><li><span><a href="#Manual-download" data-toc-modified-id="Manual-download-3.2"><span class="toc-item-num">3.2 </span>Manual download</a></span><ul class="toc-item"><li><span><a href="#Energinet.dk" data-toc-modified-id="Energinet.dk-3.2.1"><span class="toc-item-num">3.2.1 </span>Energinet.dk</a></span></li><li><span><a href="#CEPS" data-toc-modified-id="CEPS-3.2.2"><span class="toc-item-num">3.2.2 </span>CEPS</a></span></li><li><span><a href="#ENTSO-E-Power-Statistics" data-toc-modified-id="ENTSO-E-Power-Statistics-3.2.3"><span class="toc-item-num">3.2.3 </span>ENTSO-E Power Statistics</a></span></li></ul></li></ul></li><li><span><a href="#Read" data-toc-modified-id="Read-4"><span class="toc-item-num">4 </span>Read</a></span><ul class="toc-item"><li><span><a href="#Preparations" data-toc-modified-id="Preparations-4.1"><span class="toc-item-num">4.1 </span>Preparations</a></span></li><li><span><a href="#Reading-loop" data-toc-modified-id="Reading-loop-4.2"><span class="toc-item-num">4.2 </span>Reading loop</a></span></li><li><span><a href="#Save-raw-data" data-toc-modified-id="Save-raw-data-4.3"><span class="toc-item-num">4.3 </span>Save raw data</a></span></li></ul></li><li><span><a href="#Processing" data-toc-modified-id="Processing-5"><span class="toc-item-num">5 </span>Processing</a></span><ul class="toc-item"><li><span><a href="#Missing-data-handling" data-toc-modified-id="Missing-data-handling-5.1"><span class="toc-item-num">5.1 </span>Missing data handling</a></span><ul class="toc-item"><li><span><a href="#Interpolation" data-toc-modified-id="Interpolation-5.1.1"><span class="toc-item-num">5.1.1 </span>Interpolation</a></span></li></ul></li><li><span><a href="#Country-specific-calculations" data-toc-modified-id="Country-specific-calculations-5.2"><span class="toc-item-num">5.2 </span>Country specific calculations</a></span><ul class="toc-item"><li><span><a href="#Calculate-onshore-wind-generation-for-German-TSOs" data-toc-modified-id="Calculate-onshore-wind-generation-for-German-TSOs-5.2.1"><span class="toc-item-num">5.2.1 </span>Calculate onshore wind generation for German TSOs</a></span></li><li><span><a href="#Calculate-aggregate-wind-capacity-for-Germany-(on-+-offshore)" data-toc-modified-id="Calculate-aggregate-wind-capacity-for-Germany-(on-+-offshore)-5.2.2"><span class="toc-item-num">5.2.2 </span>Calculate aggregate wind capacity for Germany (on + offshore)</a></span></li><li><span><a href="#Aggregate-German-data-from-individual-TSOs-and-calculate-availabilities/profiles" data-toc-modified-id="Aggregate-German-data-from-individual-TSOs-and-calculate-availabilities/profiles-5.2.3"><span class="toc-item-num">5.2.3 </span>Aggregate German data from individual TSOs and calculate availabilities/profiles</a></span></li><li><span><a href="#Agregate-Italian-data" data-toc-modified-id="Agregate-Italian-data-5.2.4"><span class="toc-item-num">5.2.4 </span>Agregate Italian data</a></span></li></ul></li><li><span><a href="#Fill-columns-not-retrieved-directly-from-TSO-webites-with--ENTSO-E-Transparency-data" data-toc-modified-id="Fill-columns-not-retrieved-directly-from-TSO-webites-with--ENTSO-E-Transparency-data-5.3"><span class="toc-item-num">5.3 </span>Fill columns not retrieved directly from TSO webites with ENTSO-E Transparency data</a></span></li><li><span><a href="#Resample-higher-frequencies-to-60'" data-toc-modified-id="Resample-higher-frequencies-to-60'-5.4"><span class="toc-item-num">5.4 </span>Resample higher frequencies to 60'</a></span></li><li><span><a href="#Insert-a-column-with-Central-European-(Summer-)time" data-toc-modified-id="Insert-a-column-with-Central-European-(Summer-)time-5.5"><span class="toc-item-num">5.5 </span>Insert a column with Central European (Summer-)time</a></span></li></ul></li><li><span><a href="#Create-a-final-savepoint" data-toc-modified-id="Create-a-final-savepoint-6"><span class="toc-item-num">6 </span>Create a final savepoint</a></span></li><li><span><a href="#Write-data-to-disk" data-toc-modified-id="Write-data-to-disk-7"><span class="toc-item-num">7 </span>Write data to disk</a></span><ul class="toc-item"><li><span><a href="#Limit-time-range" data-toc-modified-id="Limit-time-range-7.1"><span class="toc-item-num">7.1 </span>Limit time range</a></span></li><li><span><a href="#Different-shapes" data-toc-modified-id="Different-shapes-7.2"><span class="toc-item-num">7.2 </span>Different shapes</a></span></li><li><span><a href="#Write-to-SQL-database" data-toc-modified-id="Write-to-SQL-database-7.3"><span class="toc-item-num">7.3 </span>Write to SQL-database</a></span></li><li><span><a href="#Write-to-Excel" data-toc-modified-id="Write-to-Excel-7.4"><span class="toc-item-num">7.4 </span>Write to Excel</a></span></li><li><span><a href="#Write-to-CSV" data-toc-modified-id="Write-to-CSV-7.5"><span class="toc-item-num">7.5 </span>Write to CSV</a></span></li></ul></li><li><span><a href="#Create-metadata" data-toc-modified-id="Create-metadata-8"><span class="toc-item-num">8 </span>Create metadata</a></span><ul class="toc-item"><li><span><a href="#Write-checksums.txt" data-toc-modified-id="Write-checksums.txt-8.1"><span class="toc-item-num">8.1 </span>Write checksums.txt</a></span></li></ul></li></ul></div>
# Introductory Notes
This Notebook handles missing data, performs calculations and aggragations and creates the output files.
# Settings
## Set version number and recent changes
Executing this script till the end will create a new version of the data package.
The Version number specifies the local directory for the data <br>
We include a note on what has been changed.
```
version = '2019-01-31'
changes = '''Added a new source, Terna.'''
```
## Import Python libraries
This section: load libraries and set up a log.
Note that the download module makes use of the [pycountry](https://pypi.python.org/pypi/pycountry) library that is not part of Anaconda. Install it with with `pip install pycountry` from the command line.
```
# Python modules
from datetime import datetime, date, timedelta, time
import pandas as pd
import numpy as np
import logging
import json
import sqlite3
import yaml
import itertools
import os
import pytz
from shutil import copyfile
import pickle
# Skripts from time-series repository
from timeseries_scripts.read import read
from timeseries_scripts.download import download
from timeseries_scripts.imputation import find_nan
from timeseries_scripts.imputation import resample_markers, glue_markers, mark_own_calc
from timeseries_scripts.make_json import make_json, get_sha_hash
# Reload modules with execution of any code, to avoid having to restart
# the kernel after editing timeseries_scripts
%load_ext autoreload
%autoreload 2
# speed up tab completion in Jupyter Notebook
%config Completer.use_jedi = False
```
## Display options
```
# Allow pretty-display of multiple variables
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Adjust the way pandas DataFrames a re displayed to fit more columns
pd.reset_option('display.max_colwidth')
pd.options.display.max_columns = 60
# pd.options.display.max_colwidth=5
```
## Set directories
```
# make sure the working directory is this file's directory
try:
os.chdir(home_path)
except NameError:
home_path = os.path.realpath('.')
# optionally, set a different directory to store outputs and raw data,
# which will take up around 15 GB of disk space
#Milos: save_path is None <=> use_external_dir == False
use_external_dir = True
if use_external_dir:
save_path = os.path.join('C:', os.sep, 'OPSD_time_series_data')
else:
save_path = home_path
input_path = os.path.join(home_path, 'input')
sources_yaml_path = os.path.join(home_path, 'input', 'sources.yml')
areas_csv_path = os.path.join(home_path, 'input', 'areas.csv')
data_path = os.path.join(save_path, version, 'original_data')
out_path = os.path.join(save_path, version)
temp_path = os.path.join(save_path, 'temp')
for path in [data_path, out_path, temp_path]:
os.makedirs(path, exist_ok=True)
# change to temp directory
os.chdir(temp_path)
os.getcwd()
```
## Chromedriver
If you want to download from sources which require scraping, download the appropriate version of Chromedriver for your platform, name it `chromedriver`, create folder `chromedriver` in the working directory, and move the driver to it. It is used by `Selenium` to scrape the links from web pages.
The current list of sources which require scraping (as of December 2018):
- Terna
- Note that the package contains a database of Terna links up to **20 December 2018**. Bu default, the links are first looked up for in this database, so if the end date of your query is not after **20 December 2018**, you won't need Selenium. In the case that you need later dates, you have two options. If you set the variable `extract_new_terna_urls` to `True`, then Selenium will be used to download the files for those later dates. If you set `extract_new_terna_urls` to `False` (which is the default value), only the recorded links will be consulted and Selenium will not be used.
- Note: Make sure that the database file, `recorded_terna_urls.csv`, is located in the working directory.
```
# Deciding whether to use the provided database of Terna links
extract_new_terna_urls = False
# Saving the choice
f = open("extract_new_terna_urls.pickle", "wb")
pickle.dump(extract_new_terna_urls, f)
f.close()
```
## Set up a log
```
# Configure the display of logs in the notebook and attach it to the root logger
logstream = logging.StreamHandler()
logstream.setLevel(logging.INFO) #threshold for log messages displayed in here
logging.basicConfig(level=logging.INFO, handlers=[logstream])
# Set up an additional logger for debug messages from the scripts
script_logger = logging.getLogger('timeseries_scripts')
script_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',)
logfile = logging.handlers.TimedRotatingFileHandler(os.path.join(temp_path, 'logfile.log'))
logfile.setFormatter(formatter)
logfile.setLevel(logging.DEBUG) #threshold for log messages in logfile
script_logger.addHandler(logfile)
# Set up a logger for logs from the notebook
logger = logging.getLogger('notebook')
logger.addHandler(logfile)
```
Execute for more detailed logging message (May slow down computation).
```
logstream.setLevel(logging.DEBUG)
```
## Select timerange
This section: select the time range and the data sources for download and read. Default: all data sources implemented, full time range available.
**Source parameters** are specified in [input/sources.yml](input/sources.yml), which describes, for each source, the datasets (such as wind and solar generation) alongside all the parameters necessary to execute the downloads.
The option to perform downloading and reading of subsets is for testing only. To be able to run the script succesfully until the end, all sources have to be included, or otherwise the script will run into errors (i.e. the step where aggregate German timeseries are caculated requires data from all four German TSOs to be loaded).
In order to do this, specify the beginning and end of the interval for which to attempt the download.
Type `None` to download all available data.
```
start_from_user = date(2010, 1, 1)
end_from_user = date(2019, 1, 21)
```
## Select download source
Instead of downloading from the sources, the complete raw data can be downloaded as a zip file from the OPSD Server. Advantages are:
- much faster download
- back up of raw data in case it is deleted from the server at the original source
In order to do this, specify an archive version to use the raw data from that version that has been cached on the OPSD server as input. All data from that version will be downloaded - timerange and subset will be ignored.
Type `None` to download directly from the original sources.
```
archive_version = None # i.e. '2016-07-14'
```
## Select subset
Optionally, specify a subset to download/read.<br>
The next cell prints the available sources and datasets.<br>
```
with open(sources_yaml_path, 'r') as f:
sources = yaml.load(f.read())
for k, v in sources.items():
print(yaml.dump({k: list(v.keys())}, default_flow_style=False))
```
Copy from its output and paste to following cell to get the right format.<br>
Type `subset = None` to include all data.
```
subset = yaml.load('''
Terna:
- generation_by_source
''')
#subset = None # to include all sources
# need to exclude Elia data due to unclear copyright situation
exclude = yaml.load('''
- Elia
''')
```
Now eliminate sources and variables not in subset.
```
with open(sources_yaml_path, 'r') as f:
sources = yaml.load(f.read())
if subset: # eliminate sources and variables not in subset
sources = {source_name: {k: v
for k, v in sources[source_name].items()
if k in variable_list}
for source_name, variable_list in subset.items()}
if exclude: # eliminate sources and variables in exclude
sources = {source_name: variable_dict
for source_name, variable_dict in sources.items()
if not source_name in exclude}
# Printing the selected sources (all of them or just a subset)
print("Selected sources: ")
for k, v in sources.items():
print(yaml.dump({k: list(v.keys())}, default_flow_style=False))
```
# Download
This section: download data. Takes about 1 hour to run for the complete data set (`subset=None`).
First, a data directory is created on your local computer. Then, download parameters for each data source are defined, including the URL. These parameters are then turned into a YAML-string. Finally, the download is executed file by file.
Each file is saved under it's original filename. Note that the original file names are often not self-explanatory (called "data" or "January"). The files content is revealed by its place in the directory structure.
Some sources (currently only ENTSO-E Transparency) require an account to allow downloading. For ENTSO-E Transparency, set up an account [here](https://transparency.entsoe.eu/usrm/user/createPublicUser).
```
auth = yaml.load('''
ENTSO-E Transparency FTP:
username: your_email
password: your_password
''')
```
## Automatic download (for most sources)
```
download(sources, data_path, input_path, auth,
archive_version=archive_version,
start_from_user=start_from_user,
end_from_user=end_from_user,
testmode=False)
```
## Manual download
### Energinet.dk
Go to http://osp.energinet.dk/_layouts/Markedsdata/framework/integrations/markedsdatatemplate.aspx.
**Check The Boxes as specified below:**
- Periode
- Hent udtræk fra perioden: **01-01-2005** Til: **01-01-2018**
- Select all months
- Datakolonner
- Elspot Pris, Valutakode/MWh: **Select all**
- Produktion og forbrug, MWh/h: **Select all**
- Udtræksformat
- Valutakode: **EUR**
- Decimalformat: **Engelsk talformat (punktum som decimaltegn**
- Datoformat: **Andet datoformat (ÅÅÅÅ-MM-DD)**
- Hent Udtræk: **Til Excel**
Click **Hent Udtræk**
You will receive a file `Markedsata.xls` of about 50 MB. Open the file in Excel. There will be a warning from Excel saying that file extension and content are in conflict. Select "open anyways" and and save the file as `.xlsx`.
In order to be found by the read-function, place the downloaded file in the following subdirectory:
**`{{data_path}}{{os.sep}}Energinet.dk{{os.sep}}prices_wind_solar{{os.sep}}2005-01-01_2017-12-31`**
### CEPS
Go to http://www.ceps.cz/en/all-data#GenerationRES
**check boxes as specified below:**
DISPLAY DATA FOR: **Generation RES**
TURN ON FILTER **checked**
FILTER SETTINGS:
- Set the date range
- interval
- from: **2012** to: **2018**
- Agregation and data version
- Aggregation: **Hour**
- Agregation function: **average (AVG)**
- Data version: **real data**
- Filter
- Type of power plant: **ALL**
- Click **USE FILTER**
- DOWNLOAD DATA: **DATA V TXT**
You will receive a file `data.txt` of about 1.5 MB.
In order to be found by the read-function, place the downloaded file in the following subdirectory:
**`{{data_path}}{{os.sep}}CEPS{{os.sep}}wind_pv{{os.sep}}2012-01-01_2018-01-31`**
### ENTSO-E Power Statistics
Go to https://www.entsoe.eu/data/statistics/Pages/monthly_hourly_load.aspx
**check boxes as specified below:**
- Date From: **01-01-2016** Date To: **30-04-2016**
- Country: **(Select All)**
- Scale values to 100% using coverage ratio: **NO**
- **View Report**
- Click the Save symbol and select **Excel**
You will receive a file `1.01 Monthly%5FHourly Load%5FValues%5FStatistical.xlsx` of about 1 MB.
In order to be found by the read-function, place the downloaded file in the following subdirectory:
**`{{os.sep}}original_data{{os.sep}}ENTSO-E Power Statistics{{os.sep}}load{{os.sep}}2016-01-01_2016-04-30`**
The data covers the period from 01-01-2016 up to the present, but 4 months of data seems to be the maximum that interface supports for a single download request, so you have to repeat the download procedure for 4-Month periods to cover the whole period until the present.
# Read
This section: Read each downloaded file into a pandas-DataFrame and merge data from different sources if it has the same time resolution. Takes ~15 minutes to run.
## Preparations
Set the title of the rows at the top of the data used to store metadata internally. The order of this list determines the order of the levels in the resulting output.
```
headers = ['region', 'variable', 'attribute', 'source', 'web', 'unit']
```
Read a prepared table containing meta data on the geographical areas
```
areas = pd.read_csv(areas_csv_path)
```
View the areas table
```
areas.loc[areas['area ID'].notnull(), :'EIC'].fillna('')
```
## Reading loop
Loop through sources and variables to do the reading.
First read the original CSV, Excel etc. files into pandas DataFrames.
```
areas = pd.read_csv(areas_csv_path)
# For each source in the source dictionary
for source_name, source_dict in sources.items():
# For each variable from source_name
for variable_name, param_dict in source_dict.items():
# variable_dir = os.path.join(data_path, source_name, variable_name)
res_list = param_dict['resolution']
for res_key in res_list:
df = read(data_path, areas, source_name, variable_name,
res_key, headers, param_dict,
start_from_user=start_from_user,
end_from_user=end_from_user)
os.makedirs(res_key, exist_ok=True)
filename = '_'.join([source_name, variable_name]) + '.pickle'
df.to_pickle(os.path.join(res_key, filename))
```
Then combine the DataFrames that have the same temporal resolution
```
# Create a dictionary of empty DataFrames to be populated with data
data_sets = {'15min': pd.DataFrame(),
'30min': pd.DataFrame(),
'60min': pd.DataFrame()}
entso_e = {'15min': pd.DataFrame(),
'30min': pd.DataFrame(),
'60min': pd.DataFrame()}
for res_key in data_sets.keys():
if not os.path.isdir(res_key):
continue
for filename in os.listdir(res_key):
source_name = filename.split('_')[0]
if subset and not source_name in subset.keys():
continue
logger.info('include %s', filename)
df_portion = pd.read_pickle(os.path.join(res_key, filename))
if source_name == 'ENTSO-E Transparency FTP':
dfs = entso_e
else:
dfs = data_sets
if dfs[res_key].empty:
dfs[res_key] = df_portion
elif not df_portion.empty:
dfs[res_key] = dfs[res_key].combine_first(df_portion)
else:
logger.warning(filename + ' WAS EMPTY')
for res_key, df in data_sets.items():
logger.info(res_key + ': %s', df.shape)
for res_key, df in entso_e.items():
logger.info('ENTSO-E ' + res_key + ': %s', df.shape)
```
Display some rows of the dataframes to get a first impression of the data.
```
data_sets['60min'].head()
```
## Save raw data
Save the DataFrames created by the read function to disk. This way you have the raw data to fall back to if something goes wrong in the ramainder of this notebook without having to repeat the previos steps.
```
data_sets['15min'].to_pickle('raw_data_15.pickle')
data_sets['30min'].to_pickle('raw_data_30.pickle')
data_sets['60min'].to_pickle('raw_data_60.pickle')
entso_e['15min'].to_pickle('raw_entso_e_15.pickle')
entso_e['30min'].to_pickle('raw_entso_e_30.pickle')
entso_e['60min'].to_pickle('raw_entso_e_60.pickle')
```
Load the DataFrames saved above
```
data_sets = {}
data_sets['15min'] = pd.read_pickle('raw_data_15.pickle')
data_sets['30min'] = pd.read_pickle('raw_data_30.pickle')
data_sets['60min'] = pd.read_pickle('raw_data_60.pickle')
entso_e = {}
entso_e['15min'] = pd.read_pickle('raw_entso_e_15.pickle')
entso_e['30min'] = pd.read_pickle('raw_entso_e_30.pickle')
entso_e['60min'] = pd.read_pickle('raw_entso_e_60.pickle')
```
# Processing
This section: missing data handling, aggregation of sub-national to national data, aggregate 15'-data to 60'-resolution. Takes 30 minutes to run.
## Missing data handling
### Interpolation
Patch missing data. At this stage, only small gaps (up to 2 hours) are filled by linear interpolation. This catched most of the missing data due to daylight savings time transitions, while leaving bigger gaps untouched
The exact locations of missing data are stored in the `nan_table` DataFrames.
Where data has been interpolated, it is marked in a new column `comment`. For eaxample the comment `solar_DE-transnetbw_generation;` means that in the original data, there is a gap in the solar generation timeseries from TransnetBW in the time period where the marker appears.
Patch the datasets and display the location of missing Data in the original data. Takes ~5 minutes to run.
```
nan_tables = {}
overviews = {}
for res_key, df in data_sets.items():
data_sets[res_key], nan_tables[res_key], overviews[res_key] = find_nan(
df, res_key, headers, patch=True)
for res_key, df in entso_e.items():
entso_e[res_key], nan_tables[res_key + ' ENTSO-E'], overviews[res_key + ' ENTSO-E'] = find_nan(
df, res_key, headers, patch=True)
```
Execute this to see an example of where the data has been patched.
```
data_sets['60min'][data_sets['60min']['interpolated_values'].notnull()].tail()
```
Display the table of regions of missing values
```
nan_tables['60min']
```
You can export the NaN-tables to Excel in order to inspect where there are NaNs
```
writer = pd.ExcelWriter('NaN_table.xlsx')
for res_key, df in nan_tables.items():
df.to_excel(writer, res_key)
writer.save()
writer = pd.ExcelWriter('Overview.xlsx')
for res_key, df in overviews.items():
df.to_excel(writer, res_key)
writer.save()
```
Save/Load the patched data sets
```
data_sets['15min'].to_pickle('patched_15.pickle')
data_sets['30min'].to_pickle('patched_30.pickle')
data_sets['60min'].to_pickle('patched_60.pickle')
entso_e['15min'].to_pickle('patched_entso_e_15.pickle')
entso_e['30min'].to_pickle('patched_entso_e_30.pickle')
entso_e['60min'].to_pickle('patched_entso_e_60.pickle')
data_sets = {}
data_sets['15min'] = pd.read_pickle('patched_15.pickle')
data_sets['30min'] = pd.read_pickle('patched_30.pickle')
data_sets['60min'] = pd.read_pickle('patched_60.pickle')
entso_e = {}
entso_e['15min'] = pd.read_pickle('patched_entso_e_15.pickle')
entso_e['30min'] = pd.read_pickle('patched_entso_e_30.pickle')
entso_e['60min'] = pd.read_pickle('patched_entso_e_60.pickle')
```
## Country specific calculations
### Calculate onshore wind generation for German TSOs
For 50 Hertz, it is already in the data.
For TenneT, it is calculated by substracting offshore from total generation.
For Amprion and TransnetBW, onshore wind generation is just total wind generation.
Takes <1 second to run.
```
# Some of the following operations require the Dataframes to be lexsorted in
# the columns
for res_key, df in data_sets.items():
df.sort_index(axis=1, inplace=True)
for area, source, url in zip(
['DE_amprion', 'DE_tennet', 'DE_transnetbw'],
['Amprion', 'TenneT', 'TransnetBW'],
['http://www.amprion.net/en/wind-feed-in',
'http://www.tennettso.de/site/en/Transparency/publications/network-figures/actual-and-forecast-wind-energy-feed-in',
'https://www.transnetbw.com/en/transparency/market-data/key-figures']):
new_col_header = {
'variable': 'wind_onshore',
'region': '{area}',
'attribute': 'generation_actual',
'source': '{source}',
'web': '{url}',
'unit': 'MW'
}
if area == 'DE_tennet':
colname = ('DE_tennet', 'wind_offshore', 'generation_actual', 'TenneT')
offshore = data_sets['15min'].loc[:, colname]
else:
offshore = 0
data_sets['15min'][
tuple(new_col_header[level].format(area=area, source=source, url=url)
for level in headers)
] = (data_sets['15min'][(area, 'wind', 'generation_actual', source)] - offshore)
# Sort again
data_sets['15min'].sort_index(axis=1, inplace=True)
```
### Calculate aggregate wind capacity for Germany (on + offshore)
Apart from being interesting on it's own, this is also required to calculate an aggregated wind-profile for Germany
```
new_col_header = {
'variable': 'wind',
'region': 'DE',
'attribute': 'capacity',
'source': 'own calculation based on BNetzA and netztransparenz.de',
'web': 'http://data.open-power-system-data.org/renewable_power_plants',
'unit': 'MW'
}
new_col_header = tuple(new_col_header[level] for level in headers)
data_sets['15min'][new_col_header] = (
data_sets['15min']
.loc[:, ('DE', ['wind_onshore', 'wind_offshore'], 'capacity')]
.sum(axis=1, skipna=False))
# Sort again
data_sets['15min'].sort_index(axis=1, inplace=True)
```
### Aggregate German data from individual TSOs and calculate availabilities/profiles
The wind and solar in-feed data for the 4 German balancing areas is summed up and stored in in new columns, which are then used to calculate profiles, that is, the share of wind/solar capacity producing at a given time. The column headers are created in the fashion introduced in the read script. Takes 5 seconds to run.
```
control_areas_DE = ['DE_50hertz', 'DE_amprion', 'DE_tennet', 'DE_transnetbw']
for variable in ['solar', 'wind', 'wind_onshore', 'wind_offshore']:
# we could also include 'generation_forecast'
for attribute in ['generation_actual']:
# Calculate aggregate German generation
sum_col = data_sets['15min'].loc(axis=1)[(control_areas_DE,
variable, attribute)].sum(axis=1, skipna=False).to_frame()
# Create a new MultiIndex
new_col_header = {
'variable': '{variable}',
'region': 'DE',
'attribute': '{attribute}',
'source': 'own calculation based on German TSOs',
'web': '',
'unit': 'MW'
}
tuples = [tuple(new_col_header[level].format(
variable=variable, attribute=attribute) for level in headers)]
sum_col.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
# append aggregate German generation to the dataset after rounding
data_sets['15min'] = data_sets['15min'].combine_first(sum_col.round(0))
if attribute == 'generation_actual':
# Calculate the profile column
profile_col = (sum_col.values /
data_sets['15min']['DE', variable, 'capacity']).round(4)
# Create a new MultiIndex and append profile to the dataset
new_col_header = {
'variable': '{variable}',
'region': 'DE',
'attribute': 'profile',
'source': 'own calculation based on German TSOs, BNetzA and netztranzparenz.de',
'web': '',
'unit': 'fraction'
}
tuples = [tuple(new_col_header[level].format(variable=variable)
for level in headers)]
profile_col.columns = pd.MultiIndex.from_tuples(
tuples, names=headers)
data_sets['15min'] = data_sets['15min'].combine_first(profile_col)
```
### Agregate Italian data
The data for Italy come by regions (North, Central North, Sicily, etc.) so they need to be agregated in order to get the data for Italy as a whole. In the next cell, we sum up the data by region and for each variable-attribute pair present in the Terna dataset header.
```
bidding_zones_IT = ["IT_CNOR", "IT_CSUD", "IT_NORD", "IT_SARD", "IT_SICI", "IT_SUD"]
for variable in ["solar", "wind_onshore"]:
sum_col = data_sets['60min'].loc(axis=1)[(bidding_zones_IT,
variable)].sum(axis=1, skipna=False)#.to_frame()
# Create a new MultiIndex
new_col_header = {
"region" : "IT",
"variable" : variable,
"attribute" : "generation_actual",
"source": "own calculation based on Terna",
"web" : "",
"unit" : "MW"
}
tuples = tuple(new_col_header[level] for level in headers)
data_sets['60min'][tuples] = sum_col#\
# data_sets['60min'].loc[:, (italian_regions, variable, attribute)].sum(axis=1)
# Sort again
data_sets['60min'].sort_index(axis=1, inplace=True)
```
Another savepoint
```
data_sets['15min'].to_pickle('calc_15.pickle')
data_sets['30min'].to_pickle('calc_30.pickle')
data_sets['60min'].to_pickle('calc_60.pickle')
os.chdir(temp_path)
data_sets = {}
data_sets['15min'] = pd.read_pickle('calc_15.pickle')
data_sets['30min'] = pd.read_pickle('calc_30.pickle')
data_sets['60min'] = pd.read_pickle('calc_60.pickle')
entso_e = {}
entso_e['15min'] = pd.read_pickle('patched_entso_e_15.pickle')
entso_e['30min'] = pd.read_pickle('patched_entso_e_30.pickle')
entso_e['60min'] = pd.read_pickle('patched_entso_e_60.pickle')
```
## Fill columns not retrieved directly from TSO webites with ENTSO-E Transparency data
```
for res_key, df in entso_e.items():
# Combine with TSO data
# Copy entire 30min data from ENTSO-E
if data_sets[res_key].empty:
data_sets[res_key] = df
else:
# Keep only region, variable, attribute in MultiIndex for comparison
data_cols = data_sets[res_key].columns.droplevel(
['source', 'web', 'unit'])
# Compare columns from ENTSO-E against ours, keep which we don't have yet
tuples = [col for col in df.columns if not col[:3] in data_cols]
add_cols = pd.MultiIndex.from_tuples(tuples, names=headers)
data_sets[res_key] = data_sets[res_key].combine_first(df[add_cols])
# Add the ENTSO-E markers (but only for the columns actually copied)
add_cols = ['_'.join(col[:3]) for col in tuples]
# Spread marker column out over a DataFrame for easiser comparison
# Filter out everey second column, which contains the delimiter " | "
# from the marker
marker_table = (df['interpolated_values'].str.split(' | ', expand=True)
.filter(regex='^\d*[02468]$', axis='columns'))
# Replace cells with markers marking columns not copied with NaNs
marker_table[~marker_table.isin(add_cols)] = np.nan
for col_name, col in marker_table.iteritems():
if col_name == 0:
marker_entso_e = col
else:
marker_entso_e = glue_markers(marker_entso_e, col)
# Glue ENTSO-E marker onto our old marker
marker = data_sets[res_key]['interpolated_values']
data_sets[res_key].loc[:, 'interpolated_values'] = glue_markers(
marker, df['interpolated_values'].reindex(marker.index))
```
## Resample higher frequencies to 60'
Some data comes in 15 or 30-minute intervals (i.e. German or British renewable generation), other in 60-minutes (i.e. load data from ENTSO-E and Prices). We resample the 15 and 30-minute data to hourly resolution and append it to the 60-minutes dataset.
The marker column is resampled separately in such a way that all information on where data has been interpolated is preserved.
The `.resample('H').mean()` methods calculates the means from the values for 4 quarter hours [:00, :15, :30, :45] of an hour values, inserts that for :00 and drops the other 3 entries. Takes 15 seconds to run.
```
#marker_60 = data_sets['60min']['interpolated_values']
for res_key, df in data_sets.items():
if res_key == '60min':
break
# Resample first the marker column
marker_resampled = df['interpolated_values'].groupby(
pd.Grouper(freq='60Min', closed='left', label='left')
).agg(resample_markers, drop_region='DE_AT_LU')
marker_resampled = marker_resampled.reindex(data_sets['60min'].index)
# Glue condensed 15 min marker onto 60 min marker
data_sets['60min'].loc[:, 'interpolated_values'] = glue_markers(
data_sets['60min']['interpolated_values'],
marker_resampled.reindex(data_sets['60min'].index))
# Drop DE_AT_LU bidding zone data from the 15 minute resolution data to
# be resampled since it is already provided in 60 min resolution by
# ENTSO-E Transparency
df = df.drop('DE_AT_LU', axis=1, errors='ignore')
# Do the resampling
resampled = df.resample('H').mean()
resampled.columns = resampled.columns.map(mark_own_calc)
resampled.columns.names = headers
# Round the resampled columns
for col in resampled.columns:
if col[2] == 'profile':
resampled.loc[:, col] = resampled.loc[:, col].round(4)
else:
resampled.loc[:, col] = resampled.loc[:, col].round(0)
data_sets['60min'] = data_sets['60min'].combine_first(resampled)
```
## Insert a column with Central European (Summer-)time
The index column of th data sets defines the start of the timeperiod represented by each row of that data set in **UTC** time. We include an additional column for the **CE(S)T** Central European (Summer-) Time, as this might help aligning the output data with other data sources.
```
info_cols = {'utc': 'utc_timestamp',
'cet': 'cet_cest_timestamp',
'marker': 'interpolated_values'}
for res_key, df in data_sets.items():
if df.empty:
continue
df.index.rename(info_cols['utc'], inplace=True)
df.insert(0, info_cols['cet'],
df.index.tz_localize('UTC').tz_convert('Europe/Brussels'))
```
# Create a final savepoint
```
data_sets['15min'].to_pickle('final_15.pickle')
data_sets['30min'].to_pickle('final_30.pickle')
data_sets['60min'].to_pickle('final_60.pickle')
os.chdir(temp_path)
data_sets = {}
data_sets['15min'] = pd.read_pickle('final_15.pickle')
data_sets['30min'] = pd.read_pickle('final_30.pickle')
data_sets['60min'] = pd.read_pickle('final_60.pickle')
```
Show the column names contained in the final DataFrame in a table
```
col_info = pd.DataFrame()
df = data_sets['60min']
for level in df.columns.names:
col_info[level] = df.columns.get_level_values(level)
col_info
```
# Write data to disk
This section: Save as [Data Package](http://data.okfn.org/doc/tabular-data-package) (data in CSV, metadata in JSON file). All files are saved in the directory of this notebook. Alternative file formats (SQL, XLSX) are also exported. Takes about 1 hour to run.
## Limit time range
Cut off the data outside of `[start_from_user:end_from_user]`
```
for res_key, df in data_sets.items():
# In order to make sure that the respective time period is covered in both
# UTC and CE(S)T, we set the start in CE(S)T, but the end in UTC
if start_from_user:
start_from_user = (
pytz.timezone('Europe/Brussels')
.localize(datetime.combine(start_from_user, time()))
.astimezone(pytz.timezone('UTC')))
if end_from_user:
end_from_user = (
pytz.timezone('UTC')
.localize(datetime.combine(end_from_user, time()))
# Appropriate offset to inlude the end of period
+ timedelta(days=1, minutes=-int(res_key[:2])))
# Then cut off the data_set
data_sets[res_key] = df.loc[start_from_user:end_from_user, :]
```
## Different shapes
Data are provided in three different "shapes":
- SingleIndex (easy to read for humans, compatible with datapackage standard, small file size)
- Fileformat: CSV, SQLite
- MultiIndex (easy to read into GAMS, not compatible with datapackage standard, small file size)
- Fileformat: CSV, Excel
- Stacked (compatible with data package standard, large file size, many rows, too many for Excel)
- Fileformat: CSV
The different shapes need to be created internally befor they can be saved to files. Takes about 1 minute to run.
```
data_sets_singleindex = {}
data_sets_multiindex = {}
data_sets_stacked = {}
for res_key, df in data_sets.items():
if df.empty:
continue
# # Round floating point numbers to 2 digits
# for col_name, col in df.iteritems():
# if col_name[0] in info_cols.values():
# pass
# elif col_name[2] == 'profile':
# df[col_name] = col.round(4)
# else:
# df[col_name] = col.round(3)
# MultIndex
data_sets_multiindex[res_key + '_multiindex'] = df
# SingleIndex
df_singleindex = df.copy()
# use first 3 levels of multiindex to create singleindex
df_singleindex.columns = [
col_name[0] if col_name[0] in info_cols.values()
else '_'.join([level for level in col_name[0:3] if not level == ''])
for col_name in df.columns.values]
data_sets_singleindex[res_key + '_singleindex'] = df_singleindex
# Stacked
stacked = df.copy().drop(columns=info_cols['cet'], level=0)
stacked.columns = stacked.columns.droplevel(['source', 'web', 'unit'])
# Concatrenate all columns below each other (="stack").
# df.transpose().stack() is faster than stacking all column levels
# seperately
stacked = stacked.transpose().stack(dropna=True).to_frame(name='data')
data_sets_stacked[res_key + '_stacked'] = stacked
```
## Write to SQL-database
This file format is required for the filtering function on the OPSD website. This takes ~3 minutes to complete.
```
os.chdir(out_path)
for res_key, df in data_sets_singleindex.items():
table = 'time_series_' + res_key
df = df.copy()
df.index = df.index.strftime('%Y-%m-%dT%H:%M:%SZ')
cet_col_name = info_cols['cet']
df[cet_col_name] = (df[cet_col_name].dt.strftime('%Y-%m-%dT%H:%M:%S%z'))
df.to_sql(table, sqlite3.connect('time_series.sqlite'),
if_exists='replace', index_label=info_cols['utc'])
```
## Write to Excel
Writing the full tables to Excel takes extremely long. As a workaround, only the timestamp-columns are exported. The rest of the data can than be inserted manually from the `_multindex.csv` files.
```
os.chdir(out_path)
writer = pd.ExcelWriter('time_series1.xlsx')
for res_key, df in data_sets_multiindex.items():
# Need to convert CE(S)T-timestamps to tz-naive, otherwise Excel converts
# them back to UTC
excel_timestamps = df.loc[:,(info_cols['cet'], '', '', '', '', '')]
excel_timestamps = excel_timestamps.dt.tz_localize(None)
excel_timestamps.to_excel(writer, res_key.split('_')[0],
float_format='%.2f', merge_cells=True)
# merge_cells=False doesn't work properly with multiindex
writer.save()
```
## Write to CSV
This takes about 10 minutes to complete.
```
os.chdir(out_path)
# itertoools.chain() allows iterating over multiple dicts at once
for res_stacking_key, df in itertools.chain(
data_sets_singleindex.items(),
data_sets_multiindex.items(),
data_sets_stacked.items()
):
df = df.copy()
# convert the format of the cet_cest-timestamp to ISO-8601
if not res_stacking_key.split('_')[1] == 'stacked':
df.iloc[:, 0] = df.iloc[:, 0].dt.strftime('%Y-%m-%dT%H:%M:%S%z') # https://frictionlessdata.io/specs/table-schema/#date
filename = 'time_series_' + res_stacking_key + '.csv'
df.to_csv(filename, float_format='%.4f',
date_format='%Y-%m-%dT%H:%M:%SZ')
```
# Create metadata
This section: create the metadata, both general and column-specific. All metadata we be stored as a JSON file. Takes 10s to run.
```
os.chdir(out_path)
make_json(data_sets, info_cols, version, changes, headers, areas,
start_from_user, end_from_user)
```
## Write checksums.txt
We publish SHA-checksums for the outputfiles on GitHub to allow verifying the integrity of outputfiles on the OPSD server.
```
os.chdir(out_path)
files = os.listdir(out_path)
# Create checksums.txt in the output directory
with open('checksums.txt', 'w') as f:
for file_name in files:
if file_name.split('.')[-1] in ['csv', 'sqlite', 'xlsx']:
file_hash = get_sha_hash(file_name)
f.write('{},{}\n'.format(file_name, file_hash))
# Copy the file to root directory from where it will be pushed to GitHub,
# leaving a copy in the version directory for reference
copyfile('checksums.txt', os.path.join(home_path, 'checksums.txt'))
```
|
github_jupyter
|
```
import gym
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Bernoulli
import matplotlib.pyplot as plt
class PolicyNet(nn.Module):
def __init__(self, input_dim, output_dim):
super(PolicyNet, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.fc1 = nn.Linear(self.input_dim, 32)
self.fc2 = nn.Linear(32, 32)
self.output = nn.Linear(32, self.output_dim)
def forward(self, x):
output = F.relu(self.fc1(x))
output = F.relu(self.fc2(output))
output = torch.sigmoid(self.output(output))
return output
def convert_to_torch_variable(arr):
"""Converts a numpy array to torch variable"""
return Variable(torch.from_numpy(arr).float())
# Define environment
env = gym.make("CartPole-v0")
env.seed(0)
# Create environment monitor for video recording
video_monitor_callable = lambda _: True
# monitored_env = gym.wrappers.Monitor(env, './cartpole_videos', force=True, video_callable=video_monitor_callable)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
bernoulli_action_dim = 1
# Initialize policy network
policy_net = PolicyNet(input_dim=state_dim, output_dim=bernoulli_action_dim)
# Hyperparameters
NUM_EPISODES = 500
GAMMA = 0.99
BATCH_SIZE = 5
LEARNING_RATE = 0.01
# Let baseline be 0 for now
baseline = 0.0
# Define optimizer
optimizer = torch.optim.RMSprop(policy_net.parameters(), lr=LEARNING_RATE)
# Collect trajectory rewards for plotting purpose
traj_reward_history = []
# training loop
for ep_i in range(NUM_EPISODES):
loss = 0.0
# Record states, actions and discounted rewards of this episode
states = []
actions = []
rewards = []
cumulative_undiscounted_reward = 0.0
for traj_i in range(BATCH_SIZE):
time_step = 0
done = False
# initialize environment
cur_state = env.reset()
cur_state = convert_to_torch_variable(cur_state)
discount_factor = 1.0
discounted_rewards = []
grad_log_params = []
while not done:
# Compute action probability using the current policy
action_prob = policy_net(cur_state)
# Sample action according to action probability
action_sampler = Bernoulli(probs=action_prob)
action = action_sampler.sample()
action = action.numpy().astype(int)[0]
# Record the states and actions -- will be used for policy gradient later
states.append(cur_state)
actions.append(action)
# take a step in the environment, and collect data
next_state, reward, done, _ = env.step(action)
# Discount the reward, and append to reward list
discounted_reward = reward * discount_factor
discounted_rewards.append(discounted_reward)
cumulative_undiscounted_reward += reward
# Prepare for taking the next step
cur_state = convert_to_torch_variable(next_state)
time_step += 1
discount_factor *= GAMMA
# Finished collecting data for the current trajectory.
# Recall temporal structure in policy gradient.
# Donstruct the "cumulative future discounted reward" at each time step.
for time_i in range(time_step):
# relevant reward is the sum of rewards from time t to the end of trajectory
relevant_reward = sum(discounted_rewards[time_i:])
rewards.append(relevant_reward)
# Finished collecting data for this batch. Update policy using policy gradient.
avg_traj_reward = cumulative_undiscounted_reward / BATCH_SIZE
traj_reward_history.append(avg_traj_reward)
if (ep_i + 1) % 10 == 0:
print("Episode {}: Average reward per trajectory = {}".format(ep_i + 1, avg_traj_reward))
#if (ep_i + 1) % 100 == 0:
# record_video()
optimizer.zero_grad()
data_len = len(states)
loss = 0.0
# Compute the policy gradient
for data_i in range(data_len):
action_prob = policy_net(states[data_i])
action_sampler = Bernoulli(probs=action_prob)
loss -= action_sampler.log_prob(torch.Tensor([actions[data_i]])) * (rewards[data_i] - baseline)
loss /= float(data_len)
loss.backward()
optimizer.step()
# Don't forget to close the environments.
#monitored_env.close()
env.close()
# Plot learning curve
plt.figure()
plt.plot(traj_reward_history)
plt.title("Learning to Solve CartPole-v1 with Policy Gradient")
plt.xlabel("Episode")
plt.ylabel("Average Reward per Trajectory")
plt.savefig("CartPole-pg.png")
plt.show()
plt.close()
```
|
github_jupyter
|
## Forecasting, updating datasets, and the "news"
In this notebook, we describe how to use Statsmodels to compute the impacts of updated or revised datasets on out-of-sample forecasts or in-sample estimates of missing data. We follow the approach of the "Nowcasting" literature (see references at the end), by using a state space model to compute the "news" and impacts of incoming data.
**Note**: this notebook applies to Statsmodels v0.12+. In addition, it only applies to the state space models or related classes, which are: `sm.tsa.statespace.ExponentialSmoothing`, `sm.tsa.arima.ARIMA`, `sm.tsa.SARIMAX`, `sm.tsa.UnobservedComponents`, `sm.tsa.VARMAX`, and `sm.tsa.DynamicFactor`.
```
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
```
Forecasting exercises often start with a fixed set of historical data that is used for model selection and parameter estimation. Then, the fitted selected model (or models) can be used to create out-of-sample forecasts. Most of the time, this is not the end of the story. As new data comes in, you may need to evaluate your forecast errors, possibly update your models, and create updated out-of-sample forecasts. This is sometimes called a "real-time" forecasting exercise (by contrast, a pseudo real-time exercise is one in which you simulate this procedure).
If all that matters is minimizing some loss function based on forecast errors (like MSE), then when new data comes in you may just want to completely redo model selection, parameter estimation and out-of-sample forecasting, using the updated datapoints. If you do this, your new forecasts will have changed for two reasons:
1. You have received new data that gives you new information
2. Your forecasting model or the estimated parameters are different
In this notebook, we focus on methods for isolating the first effect. The way we do this comes from the so-called "nowcasting" literature, and in particular Bańbura, Giannone, and Reichlin (2011), Bańbura and Modugno (2014), and Bańbura et al. (2014). They describe this exercise as computing the "**news**", and we follow them in using this language in Statsmodels.
These methods are perhaps most useful with multivariate models, since there multiple variables may update at the same time, and it is not immediately obvious what forecast change was created by what updated variable. However, they can still be useful for thinking about forecast revisions in univariate models. We will therefore start with the simpler univariate case to explain how things work, and then move to the multivariate case afterwards.
**Note on revisions**: the framework that we are using is designed to decompose changes to forecasts from newly observed datapoints. It can also take into account *revisions* to previously published datapoints, but it does not decompose them separately. Instead, it only shows the aggregate effect of "revisions".
**Note on `exog` data**: the framework that we are using only decomposes changes to forecasts from newly observed datapoints for *modeled* variables. These are the "left-hand-side" variables that in Statsmodels are given in the `endog` arguments. This framework does not decompose or account for changes to unmodeled "right-hand-side" variables, like those included in the `exog` argument.
### Simple univariate example: AR(1)
We will begin with a simple autoregressive model, an AR(1):
$$y_t = \phi y_{t-1} + \varepsilon_t$$
- The parameter $\phi$ captures the persistence of the series
We will use this model to forecast inflation.
To make it simpler to describe the forecast updates in this notebook, we will work with inflation data that has been de-meaned, but it is straightforward in practice to augment the model with a mean term.
```
# De-mean the inflation series
y = macrodata['infl'] - macrodata['infl'].mean()
```
#### Step 1: fitting the model on the available dataset
Here, we'll simulate an out-of-sample exercise, by constructing and fitting our model using all of the data except the last five observations. We'll assume that we haven't observed these values yet, and then in subsequent steps we'll add them back into the analysis.
```
y_pre = y.iloc[:-5]
y_pre.plot(figsize=(15, 3), title='Inflation');
```
To construct forecasts, we first estimate the parameters of the model. This returns a results object that we will be able to use produce forecasts.
```
mod_pre = sm.tsa.arima.ARIMA(y_pre, order=(1, 0, 0), trend='n')
res_pre = mod_pre.fit()
print(res_pre.summary())
```
Creating the forecasts from the results object `res` is easy - you can just call the `forecast` method with the number of forecasts you want to construct. In this case, we'll construct four out-of-sample forecasts.
```
# Compute the forecasts
forecasts_pre = res_pre.forecast(4)
# Plot the last 3 years of data and the four out-of-sample forecasts
y_pre.iloc[-12:].plot(figsize=(15, 3), label='Data', legend=True)
forecasts_pre.plot(label='Forecast', legend=True);
```
For the AR(1) model, it is also easy to manually construct the forecasts. Denoting the last observed variable as $y_T$ and the $h$-step-ahead forecast as $y_{T+h|T}$, we have:
$$y_{T+h|T} = \hat \phi^h y_T$$
Where $\hat \phi$ is our estimated value for the AR(1) coefficient. From the summary output above, we can see that this is the first parameter of the model, which we can access from the `params` attribute of the results object.
```
# Get the estimated AR(1) coefficient
phi_hat = res_pre.params[0]
# Get the last observed value of the variable
y_T = y_pre.iloc[-1]
# Directly compute the forecasts at the horizons h=1,2,3,4
manual_forecasts = pd.Series([phi_hat * y_T, phi_hat**2 * y_T,
phi_hat**3 * y_T, phi_hat**4 * y_T],
index=forecasts_pre.index)
# We'll print the two to double-check that they're the same
print(pd.concat([forecasts_pre, manual_forecasts], axis=1))
```
#### Step 2: computing the "news" from a new observation
Suppose that time has passed, and we have now received another observation. Our dataset is now larger, and we can evaluate our forecast error and produce updated forecasts for the subsequent quarters.
```
# Get the next observation after the "pre" dataset
y_update = y.iloc[-5:-4]
# Print the forecast error
print('Forecast error: %.2f' % (y_update.iloc[0] - forecasts_pre.iloc[0]))
```
To compute forecasts based on our updated dataset, we will create an updated results object `res_post` using the `append` method, to append on our new observation to the previous dataset.
Note that by default, the `append` method does not re-estimate the parameters of the model. This is exactly what we want here, since we want to isolate the effect on the forecasts of the new information only.
```
# Create a new results object by passing the new observations to the `append` method
res_post = res_pre.append(y_update)
# Since we now know the value for 2008Q3, we will only use `res_post` to
# produce forecasts for 2008Q4 through 2009Q2
forecasts_post = pd.concat([y_update, res_post.forecast('2009Q2')])
print(forecasts_post)
```
In this case, the forecast error is quite large - inflation was more than 10 percentage points below the AR(1) models' forecast. (This was largely because of large swings in oil prices around the global financial crisis).
To analyse this in more depth, we can use Statsmodels to isolate the effect of the new information - or the "**news**" - on our forecasts. This means that we do not yet want to change our model or re-estimate the parameters. Instead, we will use the `news` method that is available in the results objects of state space models.
Computing the news in Statsmodels always requires a *previous* results object or dataset, and an *updated* results object or dataset. Here we will use the original results object `res_pre` as the previous results and the `res_post` results object that we just created as the updated results.
Once we have previous and updated results objects or datasets, we can compute the news by calling the `news` method. Here, we will call `res_pre.news`, and the first argument will be the updated results, `res_post` (however, if you have two results objects, the `news` method could can be called on either one).
In addition to specifying the comparison object or dataset as the first argument, there are a variety of other arguments that are accepted. The most important specify the "impact periods" that you want to consider. These "impact periods" correspond to the forecasted periods of interest; i.e. these dates specify with periods will have forecast revisions decomposed.
To specify the impact periods, you must pass two of `start`, `end`, and `periods` (similar to the Pandas `date_range` method). If your time series was a Pandas object with an associated date or period index, then you can pass dates as values for `start` and `end`, as we do below.
```
# Compute the impact of the news on the four periods that we previously
# forecasted: 2008Q3 through 2009Q2
news = res_pre.news(res_post, start='2008Q3', end='2009Q2')
# Note: one alternative way to specify these impact dates is
# `start='2008Q3', periods=4`
```
The variable `news` is an object of the class `NewsResults`, and it contains details about the updates to the data in `res_post` compared to `res_pre`, the new information in the updated dataset, and the impact that the new information had on the forecasts in the period between `start` and `end`.
One easy way to summarize the results are with the `summary` method.
```
print(news.summary())
```
**Summary output**: the default summary for this news results object printed four tables:
1. Summary of the model and datasets
2. Details of the news from updated data
3. Summary of the impacts of the new information on the forecasts between `start='2008Q3'` and `end='2009Q2'`
4. Details of how the updated data led to the impacts on the forecasts between `start='2008Q3'` and `end='2009Q2'`
These are described in more detail below.
*Notes*:
- There are a number of arguments that can be passed to the `summary` method to control this output. Check the documentation / docstring for details.
- Table (4), showing details of the updates and impacts, can become quite large if the model is multivariate, there are multiple updates, or a large number of impact dates are selected. It is only shown by default for univariate models.
**First table: summary of the model and datasets**
The first table, above, shows:
- The type of model from which the forecasts were made. Here this is an ARIMA model, since an AR(1) is a special case of an ARIMA(p,d,q) model.
- The date and time at which the analysis was computed.
- The original sample period, which here corresponds to `y_pre`
- The endpoint of the updated sample period, which here is the last date in `y_post`
**Second table: the news from updated data**
This table simply shows the forecasts from the previous results for observations that were updated in the updated sample.
*Notes*:
- Our updated dataset `y_post` did not contain any *revisions* to previously observed datapoints. If it had, there would be an additional table showing the previous and updated values of each such revision.
**Third table: summary of the impacts of the new information**
*Columns*:
The third table, above, shows:
- The previous forecast for each of the impact dates, in the "estimate (prev)" column
- The impact that the new information (the "news") had on the forecasts for each of the impact dates, in the "impact of news" column
- The updated forecast for each of the impact dates, in the "estimate (new)" column
*Notes*:
- In multivariate models, this table contains additional columns describing the relevant impacted variable for each row.
- Our updated dataset `y_post` did not contain any *revisions* to previously observed datapoints. If it had, there would be additional columns in this table showing the impact of those revisions on the forecasts for the impact dates.
- Note that `estimate (new) = estimate (prev) + impact of news`
- This table can be accessed independently using the `summary_impacts` method.
*In our example*:
Notice that in our example, the table shows the values that we computed earlier:
- The "estimate (prev)" column is identical to the forecasts from our previous model, contained in the `forecasts_pre` variable.
- The "estimate (new)" column is identical to our `forecasts_post` variable, which contains the observed value for 2008Q3 and the forecasts from the updated model for 2008Q4 - 2009Q2.
**Fourth table: details of updates and their impacts**
The fourth table, above, shows how each new observation translated into specific impacts at each impact date.
*Columns*:
The first three columns table described the relevant **update** (an "updated" is a new observation):
- The first column ("update date") shows the date of the variable that was updated.
- The second column ("forecast (prev)") shows the value that would have been forecasted for the update variable at the update date based on the previous results / dataset.
- The third column ("observed") shows the actual observed value of that updated variable / update date in the updated results / dataset.
The last four columns described the **impact** of a given update (an impact is a changed forecast within the "impact periods").
- The fourth column ("impact date") gives the date at which the given update made an impact.
- The fifth column ("news") shows the "news" associated with the given update (this is the same for each impact of a given update, but is just not sparsified by default)
- The sixth column ("weight") describes the weight that the "news" from the given update has on the impacted variable at the impact date. In general, weights will be different between each "updated variable" / "update date" / "impacted variable" / "impact date" combination.
- The seventh column ("impact") shows the impact that the given update had on the given "impacted variable" / "impact date".
*Notes*:
- In multivariate models, this table contains additional columns to show the relevant variable that was updated and variable that was impacted for each row. Here, there is only one variable ("infl"), so those columns are suppressed to save space.
- By default, the updates in this table are "sparsified" with blanks, to avoid repeating the same values for "update date", "forecast (prev)", and "observed" for each row of the table. This behavior can be overridden using the `sparsify` argument.
- Note that `impact = news * weight`.
- This table can be accessed independently using the `summary_details` method.
*In our example*:
- For the update to 2008Q3 and impact date 2008Q3, the weight is equal to 1. This is because we only have one variable, and once we have incorporated the data for 2008Q3, there is no no remaining ambiguity about the "forecast" for this date. Thus all of the "news" about this variable at 2008Q3 passes through to the "forecast" directly.
#### Addendum: manually computing the news, weights, and impacts
For this simple example with a univariate model, it is straightforward to compute all of the values shown above by hand. First, recall the formula for forecasting $y_{T+h|T} = \phi^h y_T$, and note that it follows that we also have $y_{T+h|T+1} = \phi^h y_{T+1}$. Finally, note that $y_{T|T+1} = y_T$, because if we know the value of the observations through $T+1$, we know the value of $y_T$.
**News**: The "news" is nothing more than the forecast error associated with one of the new observations. So the news associated with observation $T+1$ is:
$$n_{T+1} = y_{T+1} - y_{T+1|T} = Y_{T+1} - \phi Y_T$$
**Impacts**: The impact of the news is the difference between the updated and previous forecasts, $i_h \equiv y_{T+h|T+1} - y_{T+h|T}$.
- The previous forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} \phi y_T & \phi^2 y_T & \phi^3 y_T & \phi^4 y_T \end{pmatrix}'$.
- The updated forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} y_{T+1} & \phi y_{T+1} & \phi^2 y_{T+1} & \phi^3 y_{T+1} \end{pmatrix}'$.
The impacts are therefore:
$$\{ i_h \}_{h=1}^4 = \begin{pmatrix} y_{T+1} - \phi y_T \\ \phi (Y_{T+1} - \phi y_T) \\ \phi^2 (Y_{T+1} - \phi y_T) \\ \phi^3 (Y_{T+1} - \phi y_T) \end{pmatrix}$$
**Weights**: To compute the weights, we just need to note that it is immediate that we can rewrite the impacts in terms of the forecast errors, $n_{T+1}$.
$$\{ i_h \}_{h=1}^4 = \begin{pmatrix} 1 \\ \phi \\ \phi^2 \\ \phi^3 \end{pmatrix} n_{T+1}$$
The weights are then simply $w = \begin{pmatrix} 1 \\ \phi \\ \phi^2 \\ \phi^3 \end{pmatrix}$
We can check that this is what the `news` method has computed.
```
# Print the news, computed by the `news` method
print(news.news)
# Manually compute the news
print()
print((y_update.iloc[0] - phi_hat * y_pre.iloc[-1]).round(6))
# Print the total impacts, computed by the `news` method
# (Note: news.total_impacts = news.revision_impacts + news.update_impacts, but
# here there are no data revisions, so total and update impacts are the same)
print(news.total_impacts)
# Manually compute the impacts
print()
print(forecasts_post - forecasts_pre)
# Print the weights, computed by the `news` method
print(news.weights)
# Manually compute the weights
print()
print(np.array([1, phi_hat, phi_hat**2, phi_hat**3]).round(6))
```
### Multivariate example: dynamic factor
In this example, we'll consider forecasting monthly core price inflation based on the Personal Consumption Expenditures (PCE) price index and the Consumer Price Index (CPI), using a Dynamic Factor model. Both of these measures track prices in the US economy and are based on similar source data, but they have a number of definitional differences. Nonetheless, they track each other relatively well, so modeling them jointly using a single dynamic factor seems reasonable.
One reason that this kind of approach can be useful is that the CPI is released earlier in the month than the PCE. One the CPI is released, therefore, we can update our dynamic factor model with that additional datapoint, and obtain an improved forecast for that month's PCE release. A more involved version of this kind of analysis is available in Knotek and Zaman (2017).
We start by downloading the core CPI and PCE price index data from [FRED](https://fred.stlouisfed.org/), converting them to annualized monthly inflation rates, removing two outliers, and de-meaning each series (the dynamic factor model does not
```
import pandas_datareader as pdr
levels = pdr.get_data_fred(['PCEPILFE', 'CPILFESL'], start='1999', end='2019').to_period('M')
infl = np.log(levels).diff().iloc[1:] * 1200
infl.columns = ['PCE', 'CPI']
# Remove two outliers and de-mean the series
infl['PCE'].loc['2001-09':'2001-10'] = np.nan
```
To show how this works, we'll imagine that it is April 14, 2017, which is the data of the March 2017 CPI release. So that we can show the effect of multiple updates at once, we'll assume that we haven't updated our data since the end of January, so that:
- Our **previous dataset** will consist of all values for the PCE and CPI through January 2017
- Our **updated dataset** will additionally incorporate the CPI for February and March 2017 and the PCE data for February 2017. But it will not yet the PCE (the March 2017 PCE price index was not released until May 1, 2017).
```
# Previous dataset runs through 2017-02
y_pre = infl.loc[:'2017-01'].copy()
const_pre = np.ones(len(y_pre))
print(y_pre.tail())
# For the updated dataset, we'll just add in the
# CPI value for 2017-03
y_post = infl.loc[:'2017-03'].copy()
y_post.loc['2017-03', 'PCE'] = np.nan
const_post = np.ones(len(y_post))
# Notice the missing value for PCE in 2017-03
print(y_post.tail())
```
We chose this particular example because in March 2017, core CPI prices fell for the first time since 2010, and this information may be useful in forecast core PCE prices for that month. The graph below shows the CPI and PCE price data as it would have been observed on April 14th$^\dagger$.
-----
$\dagger$ This statement is not entirely true, because both the CPI and PCE price indexes can be revised to a certain extent after the fact. As a result, the series that we're pulling are not exactly like those observed on April 14, 2017. This could be fixed by pulling the archived data from [ALFRED](https://alfred.stlouisfed.org/) instead of [FRED](https://fred.stlouisfed.org/), but the data we have is good enough for this tutorial.
```
# Plot the updated dataset
fig, ax = plt.subplots(figsize=(15, 3))
y_post.plot(ax=ax)
ax.hlines(0, '2009', '2017-06', linewidth=1.0)
ax.set_xlim('2009', '2017-06');
```
To perform the exercise, we first construct and fit a `DynamicFactor` model. Specifically:
- We are using a single dynamic factor (`k_factors=1`)
- We are modeling the factor's dynamics with an AR(6) model (`factor_order=6`)
- We have included a vector of ones as an exogenous variable (`exog=const_pre`), because the inflation series we are working with are not mean-zero.
```
mod_pre = sm.tsa.DynamicFactor(y_pre, exog=const_pre, k_factors=1, factor_order=6)
res_pre = mod_pre.fit()
print(res_pre.summary())
```
With the fitted model in hand, we now construct the news and impacts associated with observing the CPI for March 2017. The updated data is for February 2017 and part of March 2017, and we'll examining the impacts on both March and April.
In the univariate example, we first created an updated results object, and then passed that to the `news` method. Here, we're creating the news by directly passing the updated dataset.
Notice that:
1. `y_post` contains the entire updated dataset (not just the new datapoints)
2. We also had to pass an updated `exog` array. This array must cover **both**:
- The entire period associated with `y_post`
- Any additional datapoints after the end of `y_post` through the last impact date, specified by `end`
Here, `y_post` ends in March 2017, so we needed our `exog` to extend one more period, to April 2017.
```
# Create the news results
# Note
const_post_plus1 = np.ones(len(y_post) + 1)
news = res_pre.news(y_post, exog=const_post_plus1, start='2017-03', end='2017-04')
```
> **Note**:
>
> In the univariate example, above, we first constructed a new results object, and then passed that to the `news` method. We could have done that here too, although there is an extra step required. Since we are requesting an impact for a period beyond the end of `y_post`, we would still need to pass the additional value for the `exog` variable during that period to `news`:
>
> ```python
res_post = res_pre.apply(y_post, exog=const_post)
news = res_pre.news(res_post, exog=[1.], start='2017-03', end='2017-04')
```
Now that we have computed the `news`, printing `summary` is a convenient way to see the results.
```
# Show the summary of the news results
print(news.summary())
```
Because we have multiple variables, by default the summary only shows the news from updated data along and the total impacts.
From the first table, we can see that our updated dataset contains three new data points, with most of the "news" from these data coming from the very low reading in March 2017.
The second table shows that these three datapoints substantially impacted the estimate for PCE in March 2017 (which was not yet observed). This estimate revised down by nearly 1.5 percentage points.
The updated data also impacted the forecasts in the first out-of-sample month, April 2017. After incorporating the new data, the model's forecasts for CPI and PCE inflation in that month revised down 0.29 and 0.17 percentage point, respectively.
While these tables show the "news" and the total impacts, they do not show how much of each impact was caused by each updated datapoint. To see that information, we need to look at the details tables.
One way to see the details tables is to pass `include_details=True` to the `summary` method. To avoid repeating the tables above, however, we'll just call the `summary_details` method directly.
```
print(news.summary_details())
```
This table shows that most of the revisions to the estimate of PCE in April 2017, described above, came from the news associated with the CPI release in March 2017. By contrast, the CPI release in February had only a little effect on the April forecast, and the PCE release in February had essentially no effect.
### Bibliography
Bańbura, Marta, Domenico Giannone, and Lucrezia Reichlin. "Nowcasting." The Oxford Handbook of Economic Forecasting. July 8, 2011.
Bańbura, Marta, Domenico Giannone, Michele Modugno, and Lucrezia Reichlin. "Now-casting and the real-time data flow." In Handbook of economic forecasting, vol. 2, pp. 195-237. Elsevier, 2013.
Bańbura, Marta, and Michele Modugno. "Maximum likelihood estimation of factor models on datasets with arbitrary pattern of missing data." Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
Knotek, Edward S., and Saeed Zaman. "Nowcasting US headline and core inflation." Journal of Money, Credit and Banking 49, no. 5 (2017): 931-968.
|
github_jupyter
|
```
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gray'
%matplotlib inline
# input batch size for training (default: 64)
batch_size = 64
# input batch size for testing (default: 1000)
test_batch_size = 1000
# number of epochs to train (default: 10)
epochs = 10
# learning rate (default: 0.01)
lr = 0.01
# SGD momentum (default: 0.5)
momentum = 0.5
# disables CUDA training
no_cuda = True
# random seed (default: 1)
seed = 1
# how many batches to wait before logging training status
log_interval = 10
# Setting seed for reproducibility.
torch.manual_seed(seed)
cuda = not no_cuda and torch.cuda.is_available()
print("CUDA: {}".format(cuda))
if cuda:
torch.cuda.manual_seed(seed)
cudakwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
mnist_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) # Precalcualted values.
])
train_set = datasets.MNIST(
root='data',
train=True,
transform=mnist_transform,
download=True,
)
test_set = datasets.MNIST(
root='data',
train=False,
transform=mnist_transform,
download=True,
)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True,
**cudakwargs
)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=test_batch_size,
shuffle=True,
**cudakwargs
)
```
## Loading the model.
Here we will focus only on `nn.Sequential` model types as they are easier to deal with. Generalizing the methods described here to `nn.Module` will require more work.
```
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def __str__(self):
return 'Flatten()'
model = nn.Sequential(OrderedDict([
('conv2d_1', nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3)),
('relu_1', nn.ReLU()),
('max_pooling2d_1', nn.MaxPool2d(kernel_size=2)),
('conv2d_2', nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3)),
('relu_2', nn.ReLU()),
('dropout_1', nn.Dropout(p=0.25)),
('flatten_1', Flatten()),
('dense_1', nn.Linear(3872, 64)),
('relu_3', nn.ReLU()),
('dropout_2', nn.Dropout(p=0.5)),
('dense_2', nn.Linear(64, 10)),
('readout', nn.LogSoftmax())
]))
model.load_state_dict(torch.load('example_torch_mnist_model.pth'))
```
## Accessing the layers
A `torch.nn.Sequential` module serves itself as an iterable and subscriptable container for all its children modules.
```
for i, layer in enumerate(model):
print('{}\t{}'.format(i, layer))
```
Moreover `.modules` and `.children` provide generators for accessing layers.
```
for m in model.modules():
print(m)
for c in model.children():
print(c)
```
## Getting the weigths.
```
conv2d_1_weight = model[0].weight.data.numpy()
conv2d_1_weight.shape
for i in range(32):
plt.imshow(conv2d_1_weight[i, 0])
plt.show()
```
### Getting layer properties
The layer objects themselfs expose most properties as attributes.
```
conv2d_1 = model[0]
conv2d_1.kernel_size
conv2d_1.stride
conv2d_1.dilation
conv2d_1.in_channels, conv2d_1.out_channels
conv2d_1.padding
conv2d_1.output_padding
dropout_1 = model[5]
dropout_1.p
dense_1 = model[7]
dense_1.in_features, dense_1.out_features
```
|
github_jupyter
|
## Next Task: compute precision and recall
threshold 25: zoomy, sustain->thick, smooth (user results)
zoomy, sustain -> dark, smooth (word2word matcher resuts)
smooth tp
dark fp
thik tn (fn?)
precision = tp/(tp+fp)
recall = tp/(tp+fn)
for one word, cant compute recall
later: tensorflow language models, Optimising (Kullback-Leibler) for the distribution
However, note:
Let A and B be any sets with |A|=|B| (|.| being the set cardinality, i.e. number of elements in the set). It follows that
fp = |B\A∩B|=|B|-|A∩B| = |A|-|A∩B| = |A\A∩B|=fn.
It hence follows that
precision = tp/(tp+fp)=tp/(tp+fn)=recall
I understood your definition
"A is the set of words in our ground truth, when you apply a threshold to the sliders
B is the set of words from the output of our words matcher"
in a way such that |A|=|B|
```
import sys
import ipdb
import pandas as pd
import numpy as np
from tqdm import tqdm
sys.path.append(r'C:\Temp\SoundOfAI\rg_text_to_sound\tts_pipeline\src')
from match_word_to_words import prepare_dataset,word_to_wordpair_estimator,word_to_words_matcher,prepare_dataset
import matplotlib.pyplot as plt
df = pd.read_csv('text_to_qualities.csv')
colnames = df.columns
display(df.head(2))
df.shape
df = pd.read_csv('text_to_qualities.csv')
dfnew[dfnew.description.str.match('\'')]
dfnew['description'] = dfnew.description.str.replace("'","")
dfnew['description']=dfnew.description.str.lower().str.replace('(\(not.*\))','',regex=True)
dfnew = dfnew[~dfnew.description.str.match('\(.*\)')]
dfnew.head()
wordlist = dfnew.description
unique_word_list = np.unique(wordlist).tolist()
len(wordlist),len(unique_word_list)
```
threshold 25: zoomy, sustain->thick, smooth (user results)
zoomy, sustain -> dark, smooth (word2word matcher resuts)
smooth tp
dark fp
thik tn
precision = tp/(tp+fp)
recall = tp/(tp+fn)
for one word, cant compute recall
# word pair estimator
```
df_score
df_score = dfnew.iloc[:,1:]
descriptions = dfnew.iloc[:,0]
wordpairnames = df_score.columns.tolist()
df_score.head()
target_word_pairs = [('bright', 'dark'), ('full', 'hollow'),( 'smooth', 'rough'), ('warm', 'metallic'), ('clear', 'muddy'), ('thin', 'thick'), ('pure', 'noisy'), ('rich', 'sparse'), ('soft', 'hard')]
wordpairnames_to_wordpair_dict = {s:t for s,t in zip(wordpairnames,target_word_pairs)}
wordpairnames_to_wordpair_dict
list(np.arange(49.8,50,0.1))
A=set([1,2,3])
B=set([3,4,5])
AandB = A.intersection(B)
B.difference(AandB)
def single_word_precision_recall(word,scorerow,threshold,w2wpe,wordpairnames_to_wordpair_dict):
elems_above = scorerow[(scorerow>(100-threshold)) ]
elems_below = scorerow[(scorerow<=threshold) ]
words_above = [wordpairnames_to_wordpair_dict[wordpairname][1] for wordpairname in elems_above.index]
words_below = [wordpairnames_to_wordpair_dict[wordpairname][0] for wordpairname in elems_below.index]
A = set(words_above+words_below)
opposite_pairs_beyond_threshold = elems_above.index.tolist()+elems_below.index.tolist()
B = set([w2wpe.match_word_to_wordpair(word,ind)['closest word'] for ind in opposite_pairs_beyond_threshold])
assert len(A)==len(B), 'This should never occurr!'
AandB = set(A).intersection(B)
tp = AandB
fp = B.difference(AandB) # were found but shouldn't have been
fn = A.difference(AandB) # were not found but should have been
den = len(tp)+len(fp)
if den==0:
precision = np.NaN
else:
precision = len(tp)/den
den = len(tp)+len(fn)
if den==0:
recall = np.NaN
else:
recall = len(tp)/den
if precision!=recall and not np.isnan(precision):
print('This should never occur!')
print('word, A,B,AandB,tp,fp,fn,precision,recall')
print(word, A,B,AandB,tp,fp,fn,precision,recall)
return precision,recall,len(A)
lang_model='en_core_web_sm'
w2wpe = word_to_wordpair_estimator()
w2wpe.build(wordpairnames,target_word_pairs,lang_model=lang_model)
w2wpe.match_word_to_wordpair('full','full_vs_hollow')
word = descriptions[0]
scorerow = df_score.iloc[0,:]
prec_50_list=[]
NrRelevantWordpairList=[]
for word, (irow,scorerow) in tqdm(zip(descriptions, df_score.iterrows())):
prec,rec,NrRelevantWordpairs = single_word_precision_recall(word,scorerow,10,w2wpe,wordpairnames_to_wordpair_dict)
prec_50_list.append(prec)
NrRelevantWordpairList.append(NrRelevantWordpairs)
pd.Series(prec_50_list).dropna()
len(prec_50_list),np.mean(prec_50_list)
' '.join([f'{i:1.1f}' for i in thresholdlist])
def compute_accuracy(lang_model='en_core_web_lg',thresholdlist=None):
w2wpe = word_to_wordpair_estimator()
w2wpe.build(wordpairnames,target_word_pairs,lang_model=lang_model)
if thresholdlist is None:
thresholdlist = list(np.arange(0,50,2))+list(np.arange(45,50,0.5))+[50.]
mean_accuracy_list = []
nrrelevantlist = []
for threshold in tqdm(thresholdlist):
acc_list=[]
NrRelevantWordpairList=[]
for word, (irow,scorerow) in zip(descriptions, df_score.iterrows()):
precision,recall,NrRelevantWordpairs = single_word_precision_recall(word,scorerow,threshold,w2wpe,wordpairnames_to_wordpair_dict)
acc_list.append(precision)
NrRelevantWordpairList.append(NrRelevantWordpairs)
assert len(acc_list)>0, 'something is wrong...'
meanAccuracyVal = pd.Series(acc_list).dropna().mean()
NrRelevantVal = np.mean(NrRelevantWordpairList)
mean_accuracy_list.append(meanAccuracyVal)
nrrelevantlist.append(NrRelevantVal)
return mean_accuracy_list,nrrelevantlist
%time
lang_model1 = 'en_core_web_sm'
lang_model2 = 'en_core_web_lg'
mean_accuracy_list1,nrrelevantlist1 = compute_accuracy(lang_model=lang_model1)
mean_accuracy_list2,nrrelevantlist2 = compute_accuracy(lang_model=lang_model2)
lang_model3 = 'en_core_web_md'
thresholdlist = list(np.arange(0,50,2))+list(np.arange(45,50,0.5))+[50.]
mean_accuracy_list3,nrrelevantlist3 = compute_accuracy(lang_model=lang_model3,thresholdlist=thresholdlist)
from nltk.corpus import wordnet
# Then, we're going to use the term "program" to find synsets like so:
syns = wordnet.synsets("program")
if np.all(np.isclose(np.array(nrrelevantlist1),np.array(nrrelevantlist2))):
nrrelevantlist = nrrelevantlist1
plt.figure(1,figsize=(15,7))
plt.subplot(3,1,1)
plt.plot(thresholdlist,mean_accuracy_list1,marker='o',label='Accuracy')
plt.suptitle(f'Accuracy vs. Threshold\nWords considered have (score <= threshold) or (score > 100-threshold)')
plt.title(f'Accuracy of {lang_model1}')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(2,1,2)
plt.plot(thresholdlist,mean_accuracy_list2,marker='o',label='Accuracy')
plt.title(f'Accuracy of {lang_model2}')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(3,1,3)
plt.plot(thresholdlist,nrrelevantlist,marker='o')
plt.title('Average number of relevant sliders')
plt.xlabel('threshold value')
plt.ylabel('Nr of Sliders')
plt.yticks(np.arange(1,10,2))
plt.subplots_adjust(hspace=.6)
plt.figure(1,figsize=(15,7))
plt.subplot(1,1,1)
plt.plot(thresholdlist,mean_accuracy_list3,marker='o',label='Accuracy')
plt.suptitle(f'Accuracy vs. Threshold\nWords considered have (score <= threshold) or (score > 100-threshold)')
plt.title(f'Accuracy of {lang_model3}')
plt.ylabel('Accuracy')
plt.legend()
plt.figure(1,figsize=(15,7))
plt.subplot(2,1,1)
plt.plot(thresholdlist,mean_accuracy_list1,marker='o',label=f'Accuracy of {lang_model1}')
plt.plot(thresholdlist,mean_accuracy_list2,marker='o',label=f'Accuracy of {lang_model2}')
plt.suptitle(f'Accuracy vs. Threshold\nWords considered have (score <= threshold) or (score > 100-threshold)')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(2,1,2)
plt.plot(thresholdlist,nrrelevantlist,marker='o')
plt.title('Average number of relevant sliders')
plt.xlabel('threshold value')
plt.ylabel('Nr of Sliders')
plt.yticks(np.arange(1,10,2))
plt.subplots_adjust(hspace=.6)
plt.savefig('Accuracy_vs_Threshold.svg')
row
lang_model = 'en_core_web_sm'
w2wpe = word_to_wordpair_estimator()
w2wpe.build(wordpairnames,target_word_pairs,lang_model=lang_model)
prediction_dict = w2wpe.match_word_to_wordpair(word,ind)
ind,prediction_dict[]
ind,w2wpe.match_word_to_wordpair(word,ind)
def compute_mean_acc(dfnew,df_score,thresholdmargin,threshold=50, required_confidence=0, lang_model='en_core_web_sm'):
"""
Take the opposite quality pairs for which the slider value is outside the 50+/- <thresholdmargin> band.
Compute the accuracy in predicting the correct opposite-pair word for each such pair.
threshold: where to split a score to lower or upper quality in pair: 50 is the most natural value.
The prediction must be with a (minimum) < required_confidence > otherwise the prediction is deemed unsure.
The returned accuracy is computed as
accuracy = NrCorrect/(NrCorrect+NrWrong+NrUnsure)
averaged over all words in <dfnew>.description
"""
w2wpe = word_to_wordpair_estimator()
w2wpe.build(wordpairnames,target_word_pairs,lang_model=lang_model)
acc_list = []
unsure_list = []
NrCorrect = 0
NrWrong = 0
NrUnsure = 0
for word, (irow,scorerow) in zip(dfnew.description, df_score.iterrows()):
#determine which opposite quality pairs will be correctly predicted as the first and second word in the word pair, respectively
valid_qualities = scorerow[(scorerow > threshold+thresholdmargin )|(scorerow < threshold-thresholdmargin)]
below_th = valid_qualities[valid_qualities<threshold].index.tolist()#first word in the word pair is correct
above_th = valid_qualities[valid_qualities>threshold].index.tolist()#second word in the word pair is correct
#word_pair_tuple = wordpairnames_to_wordpair_dict[word_pair]
NrCorrect = 0
NrWrong = 0
NrUnsure = 0
for word_pair in above_th:
res = w2wpe.match_word_to_wordpair(word,word_pair)
if res['slider value']>(threshold+required_confidence):# Add prediction threshold?
NrCorrect+=1
elif res['slider value']<(threshold-required_confidence):
NrWrong+=1
else:
NrUnsure+=1 #if required confidence was not reached
for word_pair in below_th:
res = w2wpe.match_word_to_wordpair(word,word_pair)
if res['slider value']<(threshold-required_confidence):# Add prediction threshold?
NrCorrect+=1
elif res['slider value']>threshold+required_confidence:
NrWrong+=1
else:
NrUnsure+=1 #if required confidence was not reached
if len(below_th)+len(above_th)==0: continue
accuracy = NrCorrect/(NrCorrect+NrWrong+NrUnsure)
unsure_ratio = NrUnsure/(NrCorrect+NrWrong+NrUnsure) # the fraction of cases where the prediction did not reach the required confidence
acc_list.append(accuracy)
unsure_list.append(unsure_ratio)
#resdict = {'NrCorrect':NrCorrect, 'NrWrong':NrWrong, 'NrUnsure':NrUnsure}
mean_acc = np.mean(acc_list) #list of accuracies for each word, over all available sliders
mean_unsure = np.mean(unsure_list)
del w2wpe
return mean_acc,mean_unsure
def f():
ipdb.set_trace()
return wordpair_matcher_dict['bright_vs_dark'].match_word_to_words('sunny')
f()
y = np.array([np.where(row['bright_vs_dark']>=50,1,0) for row in rowlist])
y.shape,yhat1.shape
yhat_binary = np.array([0 if yhatelem==target_word_pair[0] else 1 for yhatelem in yhat1])
yhat_binary.shape
len(yhat),len(rowlist)
accuracy_score(y,yhat_binary)
yhat1
df_detailed = pd.DataFrame(index=wordlist)
df_detailed.head(7)
wordlist = [w for r,w in generate_training_examples(df)]
rowlist = [r for r,w in generate_training_examples(df)]
acc_scores=dict()
for target_word_pair,opposite_quality_pair in zip(target_word_pairs,colnames):
y = np.array([np.where(row[opposite_quality_pair]>=50,1,0) for row in rowlist])
print(target_word_pair,opposite_quality_pair)
w2wm = word_to_words_matcher()
w2wm.build(target_word_pair)
yhat1 = np.array(f(wordlist,w2wm,variant=1))
df_detailed[opposite_quality_pair] = yhat1
yhat_binary = np.array([0 if yhatelem==target_word_pair[0] else 1 for yhatelem in yhat1])
acc_score = accuracy_score(y,yhat_binary)
print(f'{acc_score:1.3f}')
acc_scores[opposite_quality_pair] = acc_score
print(df_detailed.shape)
df_detailed.to_excel('predicted_qualities.xlsx')
df_detailed.head(20)
pd.Series(acc_scores).plot.bar(ylabel='accuracy')
plt.plot(plt.xlim(),[0.5,0.5],'--',c='k')
plt.title(f'Accuracy of Spacy word vectors in predicting\ntext_to_qualities.csv ({len(wordlist)} qualities)')
plt.ylim(0,1)
```
## Next Task: compute precision and recall
threshold 25: zoomy, sustain->thick, smooth (user results)
zoomy, sustain -> dark, smooth (word2word matcher resuts)
smooth tp
dark fp
thik tn
precision = tp/(tp+fp)
recall = tp/(tp+fn)
for one word, cant compute recall
later: tensorflow language models, Optimising (Kullback-Leibler) for the distribution
|
github_jupyter
|
# The ISB-CGC open-access TCGA tables in Big-Query
The goal of this notebook is to introduce you to a new publicly-available, open-access dataset in BigQuery. This set of BigQuery tables was produced by the [ISB-CGC](http://www.isb-cgc.org) project, based on the open-access [TCGA](http://cancergenome.nih.gov/) data available at the TCGA [Data Portal](https://tcga-data.nci.nih.gov/tcga/). You will need to have access to a Google Cloud Platform (GCP) project in order to use BigQuery. If you don't already have one, you can sign up for a [free-trial](https://cloud.google.com/free-trial/) or contact [us](mailto://[email protected]) and become part of the community evaluation phase of our Cancer Genomics Cloud pilot. (You can find more information about this NCI-funded program [here](https://cbiit.nci.nih.gov/ncip/nci-cancer-genomics-cloud-pilots).)
We are not attempting to provide a thorough BigQuery or IPython tutorial here, as a wealth of such information already exists. Here are links to some resources that you might find useful:
* [BigQuery](https://cloud.google.com/bigquery/what-is-bigquery),
* the BigQuery [web UI](https://bigquery.cloud.google.com/) where you can run queries interactively,
* [IPython](http://ipython.org/) (now known as [Jupyter](http://jupyter.org/)), and
* [Cloud Datalab](https://cloud.google.com/datalab/) the recently announced interactive cloud-based platform that this notebook is being developed on.
There are also many tutorials and samples available on github (see, in particular, the [datalab](https://github.com/GoogleCloudPlatform/datalab) repo and the [Google Genomics]( https://github.com/googlegenomics) project).
In order to work with BigQuery, the first thing you need to do is import the [gcp.bigquery](http://googlecloudplatform.github.io/datalab/gcp.bigquery.html) package:
```
import gcp.bigquery as bq
```
The next thing you need to know is how to access the specific tables you are interested in. BigQuery tables are organized into datasets, and datasets are owned by a specific GCP project. The tables we are introducing in this notebook are in a dataset called **`tcga_201607_beta`**, owned by the **`isb-cgc`** project. A full table identifier is of the form `<project_id>:<dataset_id>.<table_id>`. Let's start by getting some basic information about the tables in this dataset:
```
d = bq.DataSet('isb-cgc:tcga_201607_beta')
for t in d.tables():
print '%10d rows %12d bytes %s' \
% (t.metadata.rows, t.metadata.size, t.name.table_id)
```
These tables are based on the open-access TCGA data as of July 2016. The molecular data is all "Level 3" data, and is divided according to platform/pipeline. See [here](https://tcga-data.nci.nih.gov/tcga/tcgaDataType.jsp) for additional details regarding the TCGA data levels and data types.
Additional notebooks go into each of these tables in more detail, but here is an overview, in the same alphabetical order that they are listed in above and in the BigQuery web UI:
- **Annotations**: This table contains the annotations that are also available from the interactive [TCGA Annotations Manager](https://tcga-data.nci.nih.gov/annotations/). Annotations can be associated with any type of "item" (*eg* Patient, Sample, Aliquot, etc), and a single item may have more than one annotation. Common annotations include "Item flagged DNU", "Item is noncanonical", and "Prior malignancy." More information about this table can be found in the [TCGA Annotations](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/TCGA%20Annotations.ipynb) notebook.
- **Biospecimen_data**: This table contains information obtained from the "biospecimen" and "auxiliary" XML files in the TCGA Level-1 "bio" archives. Each row in this table represents a single "biospecimen" or "sample". Most participants in the TCGA project provided two samples: a "primary tumor" sample and a "blood normal" sample, but others provided normal-tissue, metastatic, or other types of samples. This table contains metadata about all of the samples, and more information about exploring this table and using this information to create your own custom analysis cohort can be found in the [Creating TCGA cohorts (part 1)](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Creating%20TCGA%20cohorts%20--%20part%201.ipynb) and [(part 2)](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Creating%20TCGA%20cohorts%20--%20part%202.ipynb) notebooks.
- **Clinical_data**: This table contains information obtained from the "clinical" XML files in the TCGA Level-1 "bio" archives. Not all fields in the XML files are represented in this table, but any field which was found to be significantly filled-in for at least one tumor-type has been retained. More information about exploring this table and using this information to create your own custom analysis cohort can be found in the [Creating TCGA cohorts (part 1)](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Creating%20TCGA%20cohorts%20--%20part%201.ipynb) and [(part 2)](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Creating%20TCGA%20cohorts%20--%20part%202.ipynb) notebooks.
- **Copy_Number_segments**: This table contains Level-3 copy-number segmentation results generated by The Broad Institute, from Genome Wide SNP 6 data using the CBS (Circular Binary Segmentation) algorithm. The values are base2 log(copynumber/2), centered on 0. More information about this data table can be found in the [Copy Number segments](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Copy%20Number%20segments.ipynb) notebook.
- **DNA_Methylation_betas**: This table contains Level-3 summary measures of DNA methylation for each interrogated locus (beta values: M/(M+U)). This table contains data from two different platforms: the Illumina Infinium HumanMethylation 27k and 450k arrays. More information about this data table can be found in the [DNA Methylation](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/DNA%20Methylation.ipynb) notebook. Note that individual chromosome-specific DNA Methylation tables are also available to cut down on the amount of data that you may need to query (depending on yoru use case).
- **Protein_RPPA_data**: This table contains the normalized Level-3 protein expression levels based on each antibody used to probe the sample. More information about how this data was generated by the RPPA Core Facility at MD Anderson can be found [here](https://wiki.nci.nih.gov/display/TCGA/Protein+Array+Data+Format+Specification#ProteinArrayDataFormatSpecification-Expression-Protein), and more information about this data table can be found in the [Protein expression](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Protein%20expression.ipynb) notebook.
- **Somatic_Mutation_calls**: This table contains annotated somatic mutation calls. All current MAF (Mutation Annotation Format) files were annotated using [Oncotator](http://onlinelibrary.wiley.com/doi/10.1002/humu.22771/abstract;jsessionid=15E7960BA5FEC21EE608E6D262390C52.f01t04) v1.5.1.0, and merged into a single table. More information about this data table can be found in the [Somatic Mutations](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Somatic%20Mutations.ipynb) notebook, including an example of how to use the [Tute Genomics annotations database in BigQuery](http://googlegenomics.readthedocs.org/en/latest/use_cases/annotate_variants/tute_annotation.html).
- **mRNA_BCGSC_HiSeq_RPKM**: This table contains mRNAseq-based gene expression data produced by the [BC Cancer Agency](http://www.bcgsc.ca/). (For details about a very similar table, take a look at a [notebook](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/UNC%20HiSeq%20mRNAseq%20gene%20expression.ipynb) describing the other mRNAseq gene expression table.)
- **mRNA_UNC_HiSeq_RSEM**: This table contains mRNAseq-based gene expression data produced by [UNC Lineberger](https://unclineberger.org/). More information about this data table can be found in the [UNC HiSeq mRNAseq gene expression](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/UNC%20HiSeq%20mRNAseq%20gene%20expression.ipynb) notebook.
- **miRNA_expression**: This table contains miRNAseq-based expression data for mature microRNAs produced by the [BC Cancer Agency](http://www.bcgsc.ca/). More information about this data table can be found in the [microRNA expression](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/BCGSC%20microRNA%20expression.ipynb) notebook.
### Where to start?
We suggest that you start with the two "Creating TCGA cohorts" notebooks ([part 1](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Creating%20TCGA%20cohorts%20--%20part%201.ipynb) and [part 2](https://github.com/isb-cgc/examples-Python/blob/master/notebooks/Creating%20TCGA%20cohorts%20--%20part%202.ipynb)) which describe and make use of the Clinical and Biospecimen tables. From there you can delve into the various molecular data tables as well as the Annotations table. For now these sample notebooks are intentionally relatively simple and do not do any analysis that integrates data from multiple tables but once you have a grasp of how to use the data, developing your own more complex analyses should not be difficult. You could even contribute an example back to our github repository! You are also welcome to submit bug reports, comments, and feature-requests as [github issues](https://github.com/isb-cgc/examples-Python/issues).
### A note about BigQuery tables and "tidy data"
You may be used to thinking about a molecular data table such as a gene-expression table as a matrix where the rows are genes and the columns are samples (or *vice versa*). These BigQuery tables instead use the [tidy data](https://cran.r-project.org/web/packages/tidyr/vignettes/tidy-data.html) approach, with each "cell" from the traditional data-matrix becoming a single row in the BigQuery table. A 10,000 gene x 500 sample matrix would therefore become a 5,000,000 row BigQuery table.
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import patsy
# Data: https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
# UCI citation:
# Dua, D. and Graff, C. (2019). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
# Source:
# # Hadi Fanaee-T
# Laboratory of Artificial Intelligence and Decision Support (LIAAD), University of Porto
# INESC Porto, Campus da FEUP
# Rua Dr. Roberto Frias, 378
# 4200 - 465 Porto, Portugal
# Original Source: http://capitalbikeshare.com/system-data
bikes = pd.read_csv('bikes.csv')
# Fit model1
model1 = sm.OLS.from_formula('cnt ~ temp + windspeed + holiday', data=bikes).fit()
# Fit model2
model2 = sm.OLS.from_formula('cnt ~ hum + season + weekday', data=bikes).fit()
# Print R-squared for both models
print(model1.rsquared)
print(model2.rsquared)
sns.scatterplot(x='temp', y='cnt', data = bikes)
plt.show()
model1 = sm.OLS.from_formula('cnt ~ temp', data=bikes).fit()
xs = pd.DataFrame({'temp': np.linspace(bikes.temp.min(), bikes.temp.max(), 100)})
ys = model1.predict(xs)
sns.scatterplot(x='temp', y='cnt', data = bikes)
plt.plot(xs, ys, color = 'black', linewidth=4)
plt.show()
model2 = sm.OLS.from_formula('cnt ~ temp + np.power(temp, 2)', data=bikes).fit()
xs = pd.DataFrame({'temp': np.linspace(bikes.temp.min(), bikes.temp.max(), 100)})
ys = model2.predict(xs)
sns.scatterplot(x='temp', y='cnt', data = bikes)
plt.plot(xs, ys, color = 'black', linewidth=4)
plt.show()
model3 = sm.OLS.from_formula('cnt ~ temp + np.power(temp, 2) + np.power(temp, 3)', data=bikes).fit()
xs = pd.DataFrame({'temp': np.linspace(bikes.temp.min(), bikes.temp.max(), 100)})
ys = model3.predict(xs)
sns.scatterplot(x='temp', y='cnt', data = bikes)
plt.plot(xs, ys, color = 'black', linewidth=4)
plt.show()
model4 = sm.OLS.from_formula('cnt ~ temp + np.power(temp, 2) + np.power(temp, 3) + np.power(temp, 4) + np.power(temp, 5)', data=bikes).fit()
xs = pd.DataFrame({'temp': np.linspace(bikes.temp.min(), bikes.temp.max(), 100)})
ys = model4.predict(xs)
sns.scatterplot(x='temp', y='cnt', data = bikes)
plt.plot(xs, ys, color = 'black', linewidth=4)
plt.show()
model5 = sm.OLS.from_formula('cnt ~ temp + np.power(temp, 2) + np.power(temp, 3) + np.power(temp, 4) + np.power(temp, 5) + np.power(temp, 6) + np.power(temp, 7) + np.power(temp, 8) + np.power(temp, 9) + np.power(temp, 10)', data=bikes).fit()
xs = pd.DataFrame({'temp': np.linspace(bikes.temp.min(), bikes.temp.max(), 100)})
ys = model5.predict(xs)
sns.scatterplot(x='temp', y='cnt', data = bikes)
plt.plot(xs, ys, color = 'black', linewidth=4)
plt.show()
print(model1.rsquared)
print(model2.rsquared)
print(model3.rsquared)
print(model4.rsquared)
print(model5.rsquared)
print(model1.rsquared_adj)
print(model2.rsquared_adj)
print(model3.rsquared_adj)
print(model4.rsquared_adj)
print(model5.rsquared_adj)
from statsmodels.stats.anova import anova_lm
anova_results = anova_lm(model1, model2, model3, model4, model5)
print(anova_results.round(2))
print(model1.llf)
print(model2.llf)
print(model3.llf)
print(model4.llf)
print(model5.llf)
print(model1.aic)
print(model2.aic)
print(model3.aic)
print(model4.aic)
print(model5.aic)
print(model1.bic)
print(model2.bic)
print(model3.bic)
print(model4.bic)
print(model5.bic)
# Set seed (don't change this)
np.random.seed(123)
# Split bikes data
indices = range(len(bikes))
s = int(0.8*len(indices))
train_ind = np.random.choice(indices, size = s, replace = False)
test_ind = list(set(indices) - set(train_ind))
bikes_train = bikes.iloc[train_ind]
bikes_test = bikes.iloc[test_ind]
# Fit model1
model1 = sm.OLS.from_formula('cnt ~ temp + atemp + hum', data=bikes_train).fit()
# Fit model2
model2 = sm.OLS.from_formula('cnt ~ season + windspeed + weekday', data=bikes_train).fit()
# Calculate predicted cnt based on model1
fitted1 = model1.predict(bikes_test)
# Calculate predicted cnt based on model2
fitted2 = model2.predict(bikes_test)
# Calculate PRMSE for model1
true = bikes_test.cnt
prmse1 = np.mean((true-fitted1)**2)**.5
# Calculate PRMSE for model2
prmse2 = np.mean((true-fitted2)**2)**.5
# Print PRMSE for both models
print(prmse1)
print(prmse2)
```
|
github_jupyter
|
<h1 align="center">SimpleITK Spatial Transformations</h1>
**Summary:**
1. Points are represented by vector-like data types: Tuple, Numpy array, List.
2. Matrices are represented by vector-like data types in row major order.
3. Default transformation initialization as the identity transform.
4. Angles specified in radians, distances specified in unknown but consistent units (nm,mm,m,km...).
5. All global transformations **except translation** are of the form:
$$T(\mathbf{x}) = A(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c}$$
Nomenclature (when printing your transformation):
* Matrix: the matrix $A$
* Center: the point $\mathbf{c}$
* Translation: the vector $\mathbf{t}$
* Offset: $\mathbf{t} + \mathbf{c} - A\mathbf{c}$
6. Bounded transformations, BSplineTransform and DisplacementFieldTransform, behave as the identity transform outside the defined bounds.
7. DisplacementFieldTransform:
* Initializing the DisplacementFieldTransform using an image requires that the image's pixel type be sitk.sitkVectorFloat64.
* Initializing the DisplacementFieldTransform using an image will "clear out" your image (your alias to the image will point to an empty, zero sized, image).
8. Composite transformations are applied in stack order (first added, last applied).
# Transformation Types
This notebook introduces the transformation types supported by SimpleITK and illustrates how to "promote" transformations from a lower to higher parameter space (e.g. 3D translation to 3D rigid).
| Class Name | Details|
|:-------------|:---------|
|[TranslationTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1TranslationTransform.html) | 2D or 3D, translation|
|[VersorTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1VersorTransform.html)| 3D, rotation represented by a versor|
|[VersorRigid3DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1VersorRigid3DTransform.html)|3D, rigid transformation with rotation represented by a versor|
|[Euler2DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1Euler2DTransform.html)| 2D, rigid transformation with rotation represented by a Euler angle|
|[Euler3DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1Euler3DTransform.html)| 3D, rigid transformation with rotation represented by Euler angles|
|[Similarity2DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1Similarity2DTransform.html)| 2D, composition of isotropic scaling and rigid transformation with rotation represented by a Euler angle|
|[Similarity3DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1Similarity3DTransform.html) | 3D, composition of isotropic scaling and rigid transformation with rotation represented by a versor|
|[ScaleTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1ScaleTransform.html)|2D or 3D, anisotropic scaling|
|[ScaleVersor3DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1ScaleVersor3DTransform.html)| 3D, rigid transformation and anisotropic scale is **added** to the rotation matrix part (not composed as one would expect)|
|[ScaleSkewVersor3DTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1ScaleSkewVersor3DTransform.html#details)|3D, rigid transformation with anisotropic scale and skew matrices **added** to the rotation matrix part (not composed as one would expect) |
|[AffineTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1AffineTransform.html)| 2D or 3D, affine transformation|
|[BSplineTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1BSplineTransform.html)|2D or 3D, deformable transformation represented by a sparse regular grid of control points |
|[DisplacementFieldTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1DisplacementFieldTransform.html)| 2D or 3D, deformable transformation represented as a dense regular grid of vectors|
|[CompositeTransform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1CompositeTransform.html)| 2D or 3D, stack of transformations concatenated via composition, last added, first applied|
|[Transform](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1Transform.html#details) | 2D or 3D, parent/superclass for all transforms
```
library(SimpleITK)
library(scatterplot3d)
OUTPUT_DIR <- "Output"
print(Version())
```
## Points in SimpleITK
### Utility functions
A number of functions that deal with point data in a uniform manner.
```
# Format a point for printing, based on specified precision with trailing zeros. Uniform printing for vector-like data
# (vector, array, list).
# @param point (vector-like): nD point with floating point coordinates.
# @param precision (int): Number of digits after the decimal point.
# @return: String representation of the given point "xx.xxx yy.yyy zz.zzz...".
point2str <- function(point, precision=1)
{
precision_str <- sprintf("%%.%df",precision)
return(paste(lapply(point, function(x) sprintf(precision_str, x)), collapse=", "))
}
# Generate random (uniform withing bounds) nD point cloud. Dimension is based on the number of pairs in the
# bounds input.
# @param bounds (list(vector-like)): List where each vector defines the coordinate bounds.
# @param num_points (int): Number of points to generate.
# @return (matrix): Matrix whose columns are the set of points.
uniform_random_points <- function(bounds, num_points)
{
return(t(sapply(bounds, function(bnd,n=num_points) runif(n, min(bnd),max(bnd)))))
}
# Distances between points transformed by the given transformation and their
# location in another coordinate system. When the points are only used to evaluate
# registration accuracy (not used in the registration) this is the target registration
# error (TRE).
# @param tx (SimpleITK transformation): Transformation applied to the points in point_list
# @param point_data (matrix): Matrix whose columns are points which we transform using tx.
# @param reference_point_data (matrix): Matrix whose columns are points to which we compare
# the transformed point data.
# @return (vector): Distances between the transformed points and the reference points.
target_registration_errors <- function(tx, point_data, reference_point_data)
{
transformed_points_mat <- apply(point_data, MARGIN=2, tx$TransformPoint)
return (sqrt(colSums((transformed_points_mat - reference_point_data)^2)))
}
# Check whether two transformations are "equivalent" in an arbitrary spatial region
# either 3D or 2D, [x=(-10,10), y=(-100,100), z=(-1000,1000)]. This is just a sanity check,
# as we are just looking at the effect of the transformations on a random set of points in
# the region.
print_transformation_differences <- function(tx1, tx2)
{
if (tx1$GetDimension()==2 && tx2$GetDimension()==2)
{
bounds <- list(c(-10,10), c(-100,100))
}
else if(tx1$GetDimension()==3 && tx2$GetDimension()==3)
{
bounds <- list(c(-10,10), c(-100,100), c(-1000,1000))
}
else
stop('Transformation dimensions mismatch, or unsupported transformation dimensionality')
num_points <- 10
point_data <- uniform_random_points(bounds, num_points)
tx1_point_data <- apply(point_data, MARGIN=2, tx1$TransformPoint)
differences <- target_registration_errors(tx2, point_data, tx1_point_data)
cat(tx1$GetName(), "-", tx2$GetName(), ":\tminDifference: ",
toString(min(differences)), " maxDifference: ",toString(max(differences)))
}
```
In SimpleITK points can be represented by any vector-like data type. In R these include vector, array, and list. In general R will treat these data types differently, as illustrated by the print function below.
```
# SimpleITK points represented by vector-like data structures.
point_vector <- c(9.0, 10.531, 11.8341)
point_array <- array(c(9.0, 10.531, 11.8341),dim=c(1,3))
point_list <- list(9.0, 10.531, 11.8341)
print(point_vector)
print(point_array)
print(point_list)
# Uniform printing with specified precision.
precision <- 2
print(point2str(point_vector, precision))
print(point2str(point_array, precision))
print(point2str(point_list, precision))
```
## Global Transformations
All global transformations <i>except translation</i> are of the form:
$$T(\mathbf{x}) = A(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c}$$
In ITK speak (when printing your transformation):
<ul>
<li>Matrix: the matrix $A$</li>
<li>Center: the point $\mathbf{c}$</li>
<li>Translation: the vector $\mathbf{t}$</li>
<li>Offset: $\mathbf{t} + \mathbf{c} - A\mathbf{c}$</li>
</ul>
## TranslationTransform
```
# A 3D translation. Note that you need to specify the dimensionality, as the sitk TranslationTransform
# represents both 2D and 3D translations.
dimension <- 3
offset <- c(1,2,3) # offset can be any vector-like data
translation <- TranslationTransform(dimension, offset)
print(translation)
translation$GetOffset()
# Transform a point and use the inverse transformation to get the original back.
point <- c(10, 11, 12)
transformed_point <- translation$TransformPoint(point)
translation_inverse <- translation$GetInverse()
cat(paste0("original point: ", point2str(point), "\n",
"transformed point: ", point2str(transformed_point), "\n",
"back to original: ", point2str(translation_inverse$TransformPoint(transformed_point))))
```
## Euler2DTransform
```
point <- c(10, 11)
rotation2D <- Euler2DTransform()
rotation2D$SetTranslation(c(7.2, 8.4))
rotation2D$SetAngle(pi/2.0)
cat(paste0("original point: ", point2str(point), "\n",
"transformed point: ", point2str(rotation2D$TransformPoint(point)),"\n"))
# Change the center of rotation so that it coincides with the point we want to
# transform, why is this a unique configuration?
rotation2D$SetCenter(point)
cat(paste0("original point: ", point2str(point), "\n",
"transformed point: ", point2str(rotation2D$TransformPoint(point)),"\n"))
```
## VersorTransform
```
# Rotation only, parametrized by Versor (vector part of unit quaternion),
# quaternion defined by rotation of theta around axis n:
# q = [n*sin(theta/2), cos(theta/2)]
# 180 degree rotation around z axis
# Use a versor:
rotation1 <- VersorTransform(c(0,0,1,0))
# Use axis-angle:
rotation2 <- VersorTransform(c(0,0,1), pi)
# Use a matrix:
rotation3 <- VersorTransform()
rotation3$SetMatrix(c(-1, 0, 0, 0, -1, 0, 0, 0, 1))
point <- c(10, 100, 1000)
p1 <- rotation1$TransformPoint(point)
p2 <- rotation2$TransformPoint(point)
p3 <- rotation3$TransformPoint(point)
cat(paste0("Points after transformation:\np1=", point2str(p1,15),
"\np2=", point2str(p2,15),"\np3=", point2str(p3,15)))
```
We applied the "same" transformation to the same point, so why are the results slightly different for the second initialization method?
This is where theory meets practice. Using the axis-angle initialization method involves trigonometric functions which on a fixed precision machine lead to these slight differences. In many cases this is not an issue, but it is something to remember. From here on we will sweep it under the rug (printing with a more reasonable precision).
## Translation to Rigid [3D]
Copy the translational component.
```
dimension <- 3
trans <- c(1,2,3)
translation <- TranslationTransform(dimension, trans)
# Only need to copy the translational component.
rigid_euler <- Euler3DTransform()
rigid_euler$SetTranslation(translation$GetOffset())
rigid_versor <- VersorRigid3DTransform()
rigid_versor$SetTranslation(translation$GetOffset())
# Sanity check to make sure the transformations are equivalent.
bounds <- list(c(-10,10), c(-100,100), c(-1000,1000))
num_points <- 10
point_data <- uniform_random_points(bounds, num_points)
transformed_point_data <- apply(point_data, MARGIN=2, translation$TransformPoint)
# Draw the original and transformed points.
all_data <- cbind(point_data, transformed_point_data)
xbnd <- range(all_data[1,])
ybnd <- range(all_data[2,])
zbnd <- range(all_data[3,])
s3d <- scatterplot3d(t(point_data), color = "blue", pch = 19, xlab='', ylab='', zlab='',
xlim=xbnd, ylim=ybnd, zlim=zbnd)
s3d$points3d(t(transformed_point_data), col = "red", pch = 17)
legend("topleft", col= c("blue", "red"), pch=c(19,17), legend = c("Original points", "Transformed points"))
euler_errors <- target_registration_errors(rigid_euler, point_data, transformed_point_data)
versor_errors <- target_registration_errors(rigid_versor, point_data, transformed_point_data)
cat(paste0("Euler\tminError:", point2str(min(euler_errors))," maxError: ", point2str(max(euler_errors)),"\n"))
cat(paste0("Versor\tminError:", point2str(min(versor_errors))," maxError: ", point2str(max(versor_errors)),"\n"))
```
## Rotation to Rigid [3D]
Copy the matrix or versor and <b>center of rotation</b>.
```
rotationCenter <- c(10, 10, 10)
rotation <- VersorTransform(c(0,0,1,0), rotationCenter)
rigid_euler <- Euler3DTransform()
rigid_euler$SetMatrix(rotation$GetMatrix())
rigid_euler$SetCenter(rotation$GetCenter())
rigid_versor <- VersorRigid3DTransform()
rigid_versor$SetRotation(rotation$GetVersor())
#rigid_versor.SetCenter(rotation.GetCenter()) #intentional error
# Sanity check to make sure the transformations are equivalent.
bounds <- list(c(-10,10),c(-100,100), c(-1000,1000))
num_points = 10
point_data = uniform_random_points(bounds, num_points)
transformed_point_data <- apply(point_data, MARGIN=2, rotation$TransformPoint)
euler_errors = target_registration_errors(rigid_euler, point_data, transformed_point_data)
versor_errors = target_registration_errors(rigid_versor, point_data, transformed_point_data)
# Draw the points transformed by the original transformation and after transformation
# using the incorrect transformation, illustrate the effect of center of rotation.
incorrect_transformed_point_data <- apply(point_data, 2, rigid_versor$TransformPoint)
all_data <- cbind(transformed_point_data, incorrect_transformed_point_data)
xbnd <- range(all_data[1,])
ybnd <- range(all_data[2,])
zbnd <- range(all_data[3,])
s3d <- scatterplot3d(t(transformed_point_data), color = "blue", pch = 19, xlab='', ylab='', zlab='',
xlim=xbnd, ylim=ybnd, zlim=zbnd)
s3d$points3d(t(incorrect_transformed_point_data), col = "red", pch = 17)
legend("topleft", col= c("blue", "red"), pch=c(19,17), legend = c("Original points", "Transformed points"))
cat(paste0("Euler\tminError:", point2str(min(euler_errors))," maxError: ", point2str(max(euler_errors)),"\n"))
cat(paste0("Versor\tminError:", point2str(min(versor_errors))," maxError: ", point2str(max(versor_errors)),"\n"))
```
## Similarity [2D]
When the center of the similarity transformation is not at the origin the effect of the transformation is not what most of us expect. This is readily visible if we limit the transformation to scaling: $T(\mathbf{x}) = s\mathbf{x}-s\mathbf{c} + \mathbf{c}$. Changing the transformation's center results in scale + translation.
```
# 2D square centered on (0,0)
points <- matrix(data=c(-1.0,-1.0, -1.0,1.0, 1.0,1.0, 1.0,-1.0), ncol=4, nrow=2)
# Scale by 2 (center default is [0,0])
similarity <- Similarity2DTransform();
similarity$SetScale(2)
scaled_points <- apply(points, MARGIN=2, similarity$TransformPoint)
#Uncomment the following lines to change the transformations center and see what happens:
#similarity$SetCenter(c(0,2))
#scaled_points <- apply(points, 2, similarity$TransformPoint)
plot(points[1,],points[2,], xlim=c(-10,10), ylim=c(-10,10), pch=19, col="blue", xlab="", ylab="", las=1)
points(scaled_points[1,], scaled_points[2,], col="red", pch=17)
legend('top', col= c("red", "blue"), pch=c(17,19), legend = c("transformed points", "original points"))
```
## Rigid to Similarity [3D]
Copy the translation, center, and matrix or versor.
```
rotation_center <- c(100, 100, 100)
theta_x <- 0.0
theta_y <- 0.0
theta_z <- pi/2.0
translation <- c(1,2,3)
rigid_euler <- Euler3DTransform(rotation_center, theta_x, theta_y, theta_z, translation)
similarity <- Similarity3DTransform()
similarity$SetMatrix(rigid_euler$GetMatrix())
similarity$SetTranslation(rigid_euler$GetTranslation())
similarity$SetCenter(rigid_euler$GetCenter())
# Apply the transformations to the same set of random points and compare the results
# (see utility functions at top of notebook).
print_transformation_differences(rigid_euler, similarity)
```
## Similarity to Affine [3D]
Copy the translation, center and matrix.
```
rotation_center <- c(100, 100, 100)
axis <- c(0,0,1)
angle <- pi/2.0
translation <- c(1,2,3)
scale_factor <- 2.0
similarity <- Similarity3DTransform(scale_factor, axis, angle, translation, rotation_center)
affine <- AffineTransform(3)
affine$SetMatrix(similarity$GetMatrix())
affine$SetTranslation(similarity$GetTranslation())
affine$SetCenter(similarity$GetCenter())
# Apply the transformations to the same set of random points and compare the results
# (see utility functions at top of notebook).
print_transformation_differences(similarity, affine)
```
## Scale Transform
Just as the case was for the similarity transformation above, when the transformations center is not at the origin, instead of a pure anisotropic scaling we also have translation ($T(\mathbf{x}) = \mathbf{s}^T\mathbf{x}-\mathbf{s}^T\mathbf{c} + \mathbf{c}$).
```
# 2D square centered on (0,0).
points <- matrix(data=c(-1.0,-1.0, -1.0,1.0, 1.0,1.0, 1.0,-1.0), ncol=4, nrow=2)
# Scale by half in x and 2 in y.
scale <- ScaleTransform(2, c(0.5,2));
scaled_points <- apply(points, 2, scale$TransformPoint)
#Uncomment the following lines to change the transformations center and see what happens:
#scale$SetCenter(c(0,2))
#scaled_points <- apply(points, 2, scale$TransformPoint)
plot(points[1,],points[2,], xlim=c(-10,10), ylim=c(-10,10), pch=19, col="blue", xlab="", ylab="", las=1)
points(scaled_points[1,], scaled_points[2,], col="red", pch=17)
legend('top', col= c("red", "blue"), pch=c(17,19), legend = c("transformed points", "original points"))
```
## Scale Versor
This is not what you would expect from the name (composition of anisotropic scaling and rigid). This is:
$$T(x) = (R+S)(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c},\;\; \textrm{where } S= \left[\begin{array}{ccc} s_0-1 & 0 & 0 \\ 0 & s_1-1 & 0 \\ 0 & 0 & s_2-1 \end{array}\right]$$
There is no natural way of "promoting" the similarity transformation to this transformation.
```
scales <- c(0.5,0.7,0.9)
translation <- c(1,2,3)
axis <- c(0,0,1)
angle <- 0.0
scale_versor <- ScaleVersor3DTransform(scales, axis, angle, translation)
print(scale_versor)
```
## Scale Skew Versor
Again, not what you expect based on the name, this is not a composition of transformations. This is:
$$T(x) = (R+S+K)(\mathbf{x}-\mathbf{c}) + \mathbf{t} + \mathbf{c},\;\; \textrm{where } S = \left[\begin{array}{ccc} s_0-1 & 0 & 0 \\ 0 & s_1-1 & 0 \\ 0 & 0 & s_2-1 \end{array}\right]\;\; \textrm{and } K = \left[\begin{array}{ccc} 0 & k_0 & k_1 \\ k_2 & 0 & k_3 \\ k_4 & k_5 & 0 \end{array}\right]$$
In practice this is an over-parametrized version of the affine transform, 15 (scale, skew, versor, translation) vs. 12 parameters (matrix, translation).
```
scale <- c(2,2.1,3)
skew <- 0:1/6.0:1 #six equally spaced values in[0,1], an arbitrary choice
translation <- c(1,2,3)
versor <- c(0,0,0,1.0)
scale_skew_versor <- ScaleSkewVersor3DTransform(scale, skew, versor, translation)
print(scale_skew_versor)
```
## Bounded Transformations
SimpleITK supports two types of bounded non-rigid transformations, BSplineTransform (sparse representation) and DisplacementFieldTransform (dense representation).
Transforming a point that is outside the bounds will return the original point - identity transform.
```
#
# This function displays the effects of the deformable transformation on a grid of points by scaling the
# initial displacements (either of control points for BSpline or the deformation field itself). It does
# assume that all points are contained in the range(-2.5,-2.5), (2.5,2.5) - for display.
#
display_displacement_scaling_effect <- function(s, original_x_mat, original_y_mat, tx, original_control_point_displacements)
{
if(tx$GetDimension()!=2)
stop('display_displacement_scaling_effect only works in 2D')
tx$SetParameters(s*original_control_point_displacements)
transformed_points <- mapply(function(x,y) tx$TransformPoint(c(x,y)), original_x_mat, original_y_mat)
plot(original_x_mat,original_y_mat, xlim=c(-2.5,2.5), ylim=c(-2.5,2.5), pch=19, col="blue", xlab="", ylab="", las=1)
points(transformed_points[1,], transformed_points[2,], col="red", pch=17)
legend('top', col= c("red", "blue"), pch=c(17,19), legend = c("transformed points", "original points"))
}
```
## BSpline
Using a sparse set of control points to control a free form deformation.
```
# Create the transformation (when working with images it is easier to use the BSplineTransformInitializer function
# or its object oriented counterpart BSplineTransformInitializerFilter).
dimension <- 2
spline_order <- 3
direction_matrix_row_major <- c(1.0,0.0,0.0,1.0) # identity, mesh is axis aligned
origin <- c(-1.0,-1.0)
domain_physical_dimensions <- c(2,2)
bspline <- BSplineTransform(dimension, spline_order)
bspline$SetTransformDomainOrigin(origin)
bspline$SetTransformDomainDirection(direction_matrix_row_major)
bspline$SetTransformDomainPhysicalDimensions(domain_physical_dimensions)
bspline$SetTransformDomainMeshSize(c(4,3))
# Random displacement of the control points.
originalControlPointDisplacements <- runif(length(bspline$GetParameters()))
bspline$SetParameters(originalControlPointDisplacements)
# Apply the bspline transformation to a grid of points
# starting the point set exactly at the origin of the bspline mesh is problematic as
# these points are considered outside the transformation's domain,
# remove epsilon below and see what happens.
numSamplesX = 10
numSamplesY = 20
eps <- .Machine$double.eps
coordsX <- seq(origin[1] + eps,
origin[1] + domain_physical_dimensions[1],
(domain_physical_dimensions[1]-eps)/(numSamplesX-1))
coordsY <- seq(origin[2] + eps,
origin[2] + domain_physical_dimensions[2],
(domain_physical_dimensions[2]-eps)/(numSamplesY-1))
# next two lines equivalent to Python's/MATLAB's meshgrid
XX <- outer(coordsY*0, coordsX, "+")
YY <- outer(coordsY, coordsX*0, "+")
display_displacement_scaling_effect(0.0, XX, YY, bspline, originalControlPointDisplacements)
#uncomment the following line to see the effect of scaling the control point displacements
# on our set of points (we recommend keeping the scaling in the range [-1.5,1.5] due to display bounds)
#display_displacement_scaling_effect(0.5, XX, YY, bspline, originalControlPointDisplacements)
```
## DisplacementField
A dense set of vectors representing the displacement inside the given domain. The most generic representation of a transformation.
```
# Create the displacement field.
# When working with images the safer thing to do is use the image based constructor,
# DisplacementFieldTransform(my_image), all the fixed parameters will be set correctly and the displacement
# field is initialized using the vectors stored in the image. SimpleITK requires that the image's pixel type be
# "sitkVectorFloat64".
displacement <- DisplacementFieldTransform(2)
field_size <- c(10,20)
field_origin <- c(-1.0,-1.0)
field_spacing <- c(2.0/9.0,2.0/19.0)
field_direction <- c(1,0,0,1) # direction cosine matrix (row major order)
# Concatenate all the information into a single list
displacement$SetFixedParameters(c(field_size, field_origin, field_spacing, field_direction))
# Set the interpolator, either sitkLinear which is default or nearest neighbor
displacement$SetInterpolator("sitkNearestNeighbor")
originalDisplacements <- runif(length(displacement$GetParameters()))
displacement$SetParameters(originalDisplacements)
coordsX <- seq(field_origin[1],
field_origin[1]+(field_size[1]-1)*field_spacing[1],
field_spacing[1])
coordsY <- seq(field_origin[2],
field_origin[2]+(field_size[2]-1)*field_spacing[2],
field_spacing[2])
# next two lines equivalent to Python's/MATLAB's meshgrid
XX <- outer(coordsY*0, coordsX, "+")
YY <- outer(coordsY, coordsX*0, "+")
display_displacement_scaling_effect(0.0, XX, YY, displacement, originalDisplacements)
#uncomment the following line to see the effect of scaling the control point displacements
# on our set of points (we recommend keeping the scaling in the range [-1.5,1.5] due to display bounds)
#display_displacement_scaling_effect(0.5, XX, YY, displacement, originalDisplacements)
```
Displacement field transform created from an image. Remember that SimpleITK will clear the image you provide, as shown in the cell below.
```
displacement_image <- Image(c(64,64), "sitkVectorFloat64")
# The only point that has any displacement is at physical SimpleITK index (0,0), R index (1,1)
displacement <- c(0.5,0.5)
# Note that SimpleITK indexing starts at zero.
displacement_image$SetPixel(c(0,0), displacement)
cat('Original displacement image size: ',point2str(displacement_image$GetSize()),"\n")
displacement_field_transform <- DisplacementFieldTransform(displacement_image)
cat("After using the image to create a transform, displacement image size: ",
point2str(displacement_image$GetSize()), "\n")
# Check that the displacement field transform does what we expect.
cat("Expected result: ",point2str(displacement),
"\nActual result: ", displacement_field_transform$TransformPoint(c(0,0)),"\n")
```
## CompositeTransform
This class represents a composition of transformations, multiple transformations applied one after the other.
The choice of whether to use a composite transformation or compose transformations on your own has subtle differences in the registration framework.
Below we represent the composite transformation $T_{affine}(T_{rigid}(x))$ in two ways: (1) use a composite transformation to contain the two; (2) combine the two into a single affine transformation. We can use both as initial transforms (SetInitialTransform) for the registration framework (ImageRegistrationMethod). The difference is that in the former case the optimized parameters belong to the rigid transformation and in the later they belong to the combined-affine transformation.
```
# Create a composite transformation: T_affine(T_rigid(x)).
rigid_center <- c(100,100,100)
theta_x <- 0.0
theta_y <- 0.0
theta_z <- pi/2.0
rigid_translation <- c(1,2,3)
rigid_euler <- Euler3DTransform(rigid_center, theta_x, theta_y, theta_z, rigid_translation)
affine_center <- c(20, 20, 20)
affine_translation <- c(5,6,7)
# Matrix is represented as a vector-like data in row major order.
affine_matrix <- runif(9)
affine <- AffineTransform(affine_matrix, affine_translation, affine_center)
# Using the composite transformation we just add them in (stack based, first in - last applied).
composite_transform <- CompositeTransform(affine)
composite_transform$AddTransform(rigid_euler)
# Create a single transform manually. this is a recipe for compositing any two global transformations
# into an affine transformation, T_0(T_1(x)):
# A = A=A0*A1
# c = c1
# t = A0*[t1+c1-c0] + t0+c0-c1
A0 <- t(matrix(affine$GetMatrix(), 3, 3))
c0 <- affine$GetCenter()
t0 <- affine$GetTranslation()
A1 <- t(matrix(rigid_euler$GetMatrix(), 3, 3))
c1 <- rigid_euler$GetCenter()
t1 <- rigid_euler$GetTranslation()
combined_mat <- A0%*%A1
combined_center <- c1
combined_translation <- A0 %*% (t1+c1-c0) + t0+c0-c1
combined_affine <- AffineTransform(c(t(combined_mat)), combined_translation, combined_center)
# Check if the two transformations are "equivalent".
cat("Apply the two transformations to the same point cloud:\n")
print_transformation_differences(composite_transform, combined_affine)
cat("\nTransform parameters:\n")
cat(paste("\tComposite transform: ", point2str(composite_transform$GetParameters(),2),"\n"))
cat(paste("\tCombined affine: ", point2str(combined_affine$GetParameters(),2),"\n"))
cat("Fixed parameters:\n")
cat(paste("\tComposite transform: ", point2str(composite_transform$GetFixedParameters(),2),"\n"))
cat(paste("\tCombined affine: ", point2str(combined_affine$GetFixedParameters(),2),"\n"))
```
Composite transforms enable a combination of a global transformation with multiple local/bounded transformations. This is useful if we want to apply deformations only in regions that deform while other regions are only effected by the global transformation.
The following code illustrates this, where the whole region is translated and subregions have different deformations.
```
# Global transformation.
translation <- TranslationTransform(2, c(1.0,0.0))
# Displacement in region 1.
displacement1 <- DisplacementFieldTransform(2)
field_size <- c(10,20)
field_origin <- c(-1.0,-1.0)
field_spacing <- c(2.0/9.0,2.0/19.0)
field_direction <- c(1,0,0,1) # direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement1$SetFixedParameters(c(field_size, field_origin, field_spacing, field_direction))
displacement1$SetParameters(rep(1.0, length(displacement1$GetParameters())))
# Displacement in region 2.
displacement2 <- DisplacementFieldTransform(2)
field_size <- c(10,20)
field_origin <- c(1.0,-3)
field_spacing <- c(2.0/9.0,2.0/19.0)
field_direction <- c(1,0,0,1) #direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement2$SetFixedParameters(c(field_size, field_origin, field_spacing, field_direction))
displacement2$SetParameters(rep(-1.0, length(displacement2$GetParameters())))
# Composite transform which applies the global and local transformations.
composite <- CompositeTransform(translation)
composite$AddTransform(displacement1)
composite$AddTransform(displacement2)
# Apply the composite transformation to points in ([-1,-3],[3,1]) and
# display the deformation using a quiver plot.
# Generate points.
numSamplesX <- 10
numSamplesY <- 10
coordsX <- seq(-1.0, 3.0, 4.0/(numSamplesX-1))
coordsY <- seq(-3.0, 1.0, 4.0/(numSamplesY-1))
# next two lines equivalent to Python's/MATLAB's meshgrid
original_x_mat <- outer(coordsY*0, coordsX, "+")
original_y_mat <- outer(coordsY, coordsX*0, "+")
# Transform points and plot.
original_points <- mapply(function(x,y) c(x,y), original_x_mat, original_y_mat)
transformed_points <- mapply(function(x,y) composite$TransformPoint(c(x,y)), original_x_mat, original_y_mat)
plot(0,0,xlim=c(-1.0,3.0), ylim=c(-3.0,1.0), las=1)
arrows(original_points[1,], original_points[2,], transformed_points[1,], transformed_points[2,])
```
## Transform
This class represents a generic transform and is the return type from the registration framework (if not done in place). Underneath the generic facade is one of the actual classes. To find out who is hiding under the hood we can query the transform to obtain the [TransformEnum](https://simpleitk.org/doxygen/latest/html/namespaceitk_1_1simple.html#a527cb966ed81d0bdc65999f4d2d4d852).
We can then downcast the generic transform to its actual type and obtain access to the relevant methods. Note that attempting to access the method will fail but not invoke an exception so we cannot use `try`, `tryCatch`.
```
tx <- Transform(TranslationTransform(2,c(1.0,0.0)))
if(tx$GetTransformEnum() == 'sitkTranslation') {
translation = TranslationTransform(tx)
cat(paste(c('Translation is:', translation$GetOffset()), collapse=' '))
}
```
## Writing and Reading
The SimpleITK.ReadTransform() returns a SimpleITK.Transform . The content of the file can be any of the SimpleITK transformations or a composite (set of transformations).
**Details of note**:
1. When read from file, the type of the returned transform is the generic `Transform`. We can then obtain the "true" transform type via the `Downcast` method.
2. Writing of nested composite transforms is not supported, you will need to "flatten" the transform before writing it to file.
```
# Create a 2D rigid transformation, write it to disk and read it back.
basic_transform <- Euler2DTransform()
basic_transform$SetTranslation(c(1,2))
basic_transform$SetAngle(pi/2.0)
full_file_name <- file.path(OUTPUT_DIR, "euler2D.tfm")
WriteTransform(basic_transform, full_file_name)
# The ReadTransform function returns a SimpleITK Transform no matter the type of the transform
# found in the file (global, bounded, composite).
read_result <- ReadTransform(full_file_name)
cat(paste("Original type: ",basic_transform$GetName(),"\nType after reading: ", read_result$GetName(),"\n"))
print_transformation_differences(basic_transform, read_result)
# Create a composite transform then write and read.
displacement <- DisplacementFieldTransform(2)
field_size <- c(10,20)
field_origin <- c(-10.0,-100.0)
field_spacing <- c(20.0/(field_size[1]-1),200.0/(field_size[2]-1))
field_direction <- c(1,0,0,1) #direction cosine matrix (row major order)
# Concatenate all the information into a single list.
displacement$SetFixedParameters(c(field_size, field_origin, field_spacing, field_direction))
displacement$SetParameters(runif(length(displacement$GetParameters())))
composite_transform <- Transform(basic_transform)
composite_transform$AddTransform(displacement)
full_file_name <- file.path(OUTPUT_DIR, "composite.tfm")
WriteTransform(composite_transform, full_file_name)
read_result <- ReadTransform(full_file_name)
cat("\n")
print_transformation_differences(composite_transform, read_result)
x_translation <- TranslationTransform(2,c(1,0))
y_translation <- TranslationTransform(2,c(0,1))
# Create composite transform with the x_translation repeated 3 times
composite_transform1 <- CompositeTransform(x_translation)
composite_transform1$AddTransform(x_translation)
composite_transform1$AddTransform(x_translation)
# Create a nested composite transform
composite_transform <- CompositeTransform(y_translation)
composite_transform$AddTransform(composite_transform1)
cat(paste0('Nested composite transform contains ',composite_transform$GetNumberOfTransforms(), ' transforms.\n'))
# We cannot write nested composite transformations, so we
# flatten it (unravel the nested part)
composite_transform$FlattenTransform()
cat(paste0('Nested composite transform after flattening contains ',composite_transform$GetNumberOfTransforms(), ' transforms.\n'))
full_file_name <- file.path(OUTPUT_DIR, "composite.tfm")
WriteTransform(composite_transform, full_file_name)
```
|
github_jupyter
|
# K-means clustering
When working with large datasets it can be helpful to group similar observations together. This process, known as clustering, is one of the most widely used in Machine Learning and is often used when our dataset comes without pre-existing labels.
In this notebook we're going to implement the classic K-means algorithm, the simplest and most widely used clustering method. Once we've implemented it we'll use it to split a dataset into groups and see how our clustering compares to the 'true' labelling.
## Import Modules
```
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
```
## Generate Dataset
```
modelParameters = {'mu':[[-2,1], [0.5, -1], [0,1]],
'pi':[0.2, 0.35, 0.45],
'sigma':0.4,
'n':200}
#Check that pi sums to 1
if np.sum(modelParameters['pi']) != 1:
print('Mixture weights must sum to 1!')
data = []
#determine which mixture each point belongs to
def generateLabels(n, pi):
#Generate n realisations of a categorical distribution given the parameters pi
unif = np.random.uniform(size = n) #Generate uniform random variables
labels = [(u < np.cumsum(pi)).argmax() for u in unif] #assign cluster
return labels
#Given the labels, generate from the corresponding normal distribution
def generateMixture(labels, params):
normalSamples = []
for label in labels:
#Select Parameters
mu = params['mu'][label]
Sigma = np.diag([params['sigma']**2]*len(mu))
#sample from multivariate normal
samp = np.random.multivariate_normal(mean = mu, cov = Sigma, size = 1)
normalSamples.append(samp)
normalSamples = np.reshape(normalSamples, (len(labels), len(params['mu'][0])))
return normalSamples
labels = generateLabels(100, modelParameters['pi']) #labels - (in practice we don't actually know what these are!)
X = generateMixture(labels, modelParameters) #features - (we do know what these are)
```
# Quickly plot the data so we know what it looks like
```
plt.figure(figsize=(10,6))
plt.scatter(X[:,0], X[:,1],c = labels)
plt.show()
```
When doing K-means clustering, our goal is to sort the data into 3 clusters using the data $X$. When we're doing clustering we don't have access to the colour (label) of each point, so the data we're actually given would look like this:
```
plt.figure(figsize=(10,6))
plt.scatter(X[:,0], X[:,1])
plt.title('Example data - no labels')
plt.show()
```
If we inspect the data we can still see that the data are roughly made up by 3 groups, one in the top left corner, one in the top right corner and one in the bottom right corner
## How does K-means work?
The K in K-means represents the number of clusters, K, that we will sort the data into.
Let's imagine we had already sorted the data into K clusters (like in the first plot above) and were trying to decide what the label of a new point should be. It would make sense to assign it to the cluster which it is closest to.
But how do we define 'closest to'? One way would be to give it the same label as the point that is closest to it (a 'nearest neighbour' approach), but a more robust way would be to determine where the 'middle' of each cluster was and assign the new point to the cluster with the closest middle. We call this 'middle' the Cluster Centroid and we calculate it be taking the average of all the points in the cluster.
That's all very well and good if we already have the clusters in place, but the whole point of the algorithm is to find out what the clusters are!
To find the clusters, we do the following:
1. Randomly initialise K Cluster Centroids
2. Assign each point to the Cluster Centroid that it is closest to.
3. Update the Cluster Centroids as the average of all points currently assigned to that centroid
4. Repeat steps 2-3 until convergence
### Why does K-means work?
Our aim is to find K Cluster Centroids such that the overall distance between each datapoint and its Cluster Centroid is minimised. That is, we want to choose cluster centroids $C = \{C_1,...,C_K\}$ such that the error function:
$$E(C) = \sum_{i=1}^n ||x_i-C_{x_i}||^2$$
is minimised, where $C_{x_i}$ is the Cluster Centroid associated with the ith observation and $||x_i-C_{x_i}||$ is the Euclidean distance between the ith observation and associated Cluster Centroid.
Now assume after $m$ iterations of the algorithm, the current value of $E(C)$ was $\alpha$. By carrying out step 2, we make sure that each point is assigned to the nearest cluster centroid - by doing this, either $\alpha$ stays the same (every point was already assigned to the closest centroid) or $\alpha$ gets smaller (one or more points is moved to a nearer centroid and hence the total distance is reduced). Similarly with step 3, by changing the centroid to be the average of all points in the cluster, we minimise the total distance associated with that cluster, meaning $\alpha$ can either stay the same or go down.
In this way we see that as we run the algorithm $E(C)$ is non-increasing, so by continuing to run the algorithm our results can't get worse - hopefully if we run it for long enough then the results will be sensible!
```
class KMeans:
def __init__(self, data, K):
self.data = data #dataset with no labels
self.K = K #Number of clusters to sort the data into
#Randomly initialise Centroids
self.Centroids = np.random.normal(0,1,(self.K, self.data.shape[1])) #If the data has p features then should be a K x p array
def closestCentroid(self, x):
#Takes a single example and returns the index of the closest centroid
#Recall centroids are saved as self.Centroids
pass
def assignToCentroid(self):
#Want to assign each observation to a centroid by passing each observation to the function closestCentroid
pass
def updateCentroids(self):
#Now based on the current cluster assignments (stored in self.assignments) update the Centroids
pass
def runKMeans(self, tolerance = 0.00001):
#When the improvement between two successive evaluations of our error function is less than tolerance, we stop
change = 1000 #Initialise change to be a big number
numIterations = 0
self.CentroidStore = [np.copy(self.Centroids)] #We want to be able to keep track of how the centroids evolved over time
#while change > tolerance:
#Code goes here...
print(f'K-means Algorithm converged in {numIterations} steps')
myKM = KMeans(X,3)
myKM.runKMeans()
```
## Let's plot the results
```
c = [0,1,2]*len(myKM.CentroidStore)
plt.figure(figsize=(10,6))
plt.scatter(np.array(myKM.CentroidStore).reshape(-1,2)[:,0], np.array(myKM.CentroidStore).reshape(-1,2)[:,1],c=np.array(c), s = 200, marker = '*')
plt.scatter(X[:,0], X[:,1], s = 12)
plt.title('Example data from a mixture of Gaussians - Cluster Centroid traces')
plt.show()
```
The stars of each colour above represents the trajectory of each cluster centroid as the algorithm progressed. Starting from a random initialisation, the centroids raplidly converged to a separate cluster, which is encouraging.
Now let's plot the data with the associated labels that we've assigned to them.
```
plt.figure(figsize=(10,6))
plt.scatter(X[:,0], X[:,1], s = 20, c = myKM.assignments)
plt.scatter(np.array(myKM.Centroids).reshape(-1,2)[:,0], np.array(myKM.Centroids).reshape(-1,2)[:,1], s = 200, marker = '*', c = 'red')
plt.title('Example data from a mixture of Gaussians - Including Cluster Centroids')
plt.show()
```
The plot above shows the final clusters (with red Cluster Centroids) assigned by the model, which should be pretty close to the 'true' clusters at the top of the page. Note: It's possible that although the clusters are the same the labels might be different - remember that K-means isn't supposed to identify the correct label, it's supposed to group the data in clusters which in reality share the same labels.
The data we've worked with in this notebook had an underlying structure that made it easy for K-means to identify distinct clusters. However let's look at an example where K-means doesn't perform so well
## The sting in the tail - A more complex data structure
```
theta = np.linspace(0, 2*np.pi, 100)
r = 15
x1 = r*np.cos(theta)
x2 = r*np.sin(theta)
#Perturb the values in the circle
x1 = x1 + np.random.normal(0,2,x1.shape[0])
x2 = x2 + np.random.normal(0,2,x2.shape[0])
z1 = np.random.normal(0,3,x1.shape[0])
z2 = np.random.normal(0,3,x2.shape[0])
x1 = np.array([x1,z1]).reshape(-1)
x2 = np.array([x2,z2]).reshape(-1)
plt.scatter(x1,x2)
plt.show()
```
It might be the case that the underlying generative structure that we want to capture is that the 'outer ring' in the plot corresponds to a certain kind of process and the 'inner circle' corresponds to another.
```
#Get data in the format we want
newX = []
for i in range(x1.shape[0]):
newX.append([x1[i], x2[i]])
newX = np.array(newX)
#Run KMeans
myNewKM = KMeans(newX,2)
myNewKM.runKMeans()
plt.figure(figsize=(10,6))
plt.scatter(newX[:,0], newX[:,1], s = 20, c = np.array(myNewKM.assignments))
plt.scatter(np.array(myNewKM.Centroids).reshape(-1,2)[:,0], np.array(myNewKM.Centroids).reshape(-1,2)[:,1], s = 200, marker = '*', c = 'red')
plt.title('Assigned K-Means labels for Ring data ')
plt.show()
```
The above plot indicates that K-means isn't able to identify the ring-like structure that we mentioned above. The clustering it has performed is perfectly valid - remember in K-means' world, labels don't exist and this is a legitmate clustering of the data! However if we were to use this clustering our subsequent analyses might be negatively impacted.
In a future post we'll implement a method which is capable of capturing non-linear relationships more effectively (the Gaussian Mixture Model).
|
github_jupyter
|
<img src="NotebookAddons/blackboard-banner.png" width="100%" />
<font face="Calibri">
<br>
<font size="7"> <b> GEOS 657: Microwave Remote Sensing<b> </font>
<font size="5"> <b>Lab 9: InSAR Time Series Analysis using GIAnT within Jupyter Notebooks</b> </font>
<br>
<font size="4"> <b> Franz J Meyer & Joshua J C Knicely; University of Alaska Fairbanks</b> <br>
<img src="NotebookAddons/UAFLogo_A_647.png" width="170" align="right" /><font color='rgba(200,0,0,0.2)'> <b>Due Date: </b>NONE</font>
</font>
<font size="3"> This Lab is part of the UAF course <a href="https://radar.community.uaf.edu/" target="_blank">GEOS 657: Microwave Remote Sensing</a>. The primary goal of this lab is to demonstrate how to process InSAR data, specifically interferograms, using the Generic InSAR Analysis Toolbox (<a href="http://earthdef.caltech.edu/projects/giant/wiki" target="_blank">GIAnT</a>) in the framework of *Jupyter Notebooks*.<br>
<b>Our specific objectives for this lab are to:</b>
- Learn how to prepare data for GIAnT.
- Use GIAnT to create maps of surface deformation.
- Understand its capabilities.
- Understand its limitations.
</font>
<br>
<font face="Calibri">
<font size="5"> <b> Target Description </b> </font>
<font size="3"> In this lab, we will analyze the volcano Sierra Negra. This is a highly active volcano on the Galapagos hotpsot. The most recent eruption occurred from 29 June to 23 August 2018. The previous eruption occurred in October 2005, prior to the launch of the Sentinel-1 satellites, which will be the source of data we use for this lab. We will be looking at the deformation that occurred prior to the volcano's 2018 eruption. </font>
<font size="4"> <font color='rgba(200,0,0,0.2)'> <b>THIS NOTEBOOK INCLUDES NO HOMEWORK ASSIGNMENTS.</b></font> <br>
Contact me at [email protected] should you run into any problems.
</font>
```
import url_widget as url_w
notebookUrl = url_w.URLWidget()
display(notebookUrl)
from IPython.display import Markdown
from IPython.display import display
notebookUrl = notebookUrl.value
user = !echo $JUPYTERHUB_USER
env = !echo $CONDA_PREFIX
if env[0] == '':
env[0] = 'Python 3 (base)'
if env[0] != '/home/jovyan/.local/envs/insar_analysis':
display(Markdown(f'<text style=color:red><strong>WARNING:</strong></text>'))
display(Markdown(f'<text style=color:red>This notebook should be run using the "insar_analysis" conda environment.</text>'))
display(Markdown(f'<text style=color:red>It is currently using the "{env[0].split("/")[-1]}" environment.</text>'))
display(Markdown(f'<text style=color:red>Select "insar_analysis" from the "Change Kernel" submenu of the "Kernel" menu.</text>'))
display(Markdown(f'<text style=color:red>If the "insar_analysis" environment is not present, use <a href="{notebookUrl.split("/user")[0]}/user/{user[0]}/notebooks/conda_environments/Create_OSL_Conda_Environments.ipynb"> Create_OSL_Conda_Environments.ipynb </a> to create it.</text>'))
display(Markdown(f'<text style=color:red>Note that you must restart your server after creating a new environment before it is usable by notebooks.</text>'))
```
<font face='Calibri'><font size='5'><b>Overview</b></font>
<br>
<font size='3'><b>About GIAnT</b>
<br>
GIAnT is a Python framework that allows rapid time series analysis of low amplitude deformation signals. It allows users to use multiple time series analysis technqiues: Small Baseline Subset (SBAS), New Small Baseline Subset (N-SBAS), and Multiscale InSAR Time-Series (MInTS). As a part of this, it includes the ability to correct for atmospheric delays by assuming a spatially uniform stratified atmosphere.
<br><br>
<b>Limitations</b>
<br>
GIAnT has a number of limitations that are important to keep in mind as these can affect its effectiveness for certain applications. It implements the simplest time-series inversion methods. Its single coherence threshold is very conservative in terms of pixel selection. It does not include any consistency checks for unwrapping errors. It has a limited dictionary of temporal model functions. It cannot correct for atmospheric effects due to differing surface elevations.
<br><br>
<b>Steps to use GIAnT</b><br>
Although GIAnT is an incredibly powerful tool, it requires very specific input. Because of the input requirements, the majority of one's effort goes to getting the data into a form that GIAnT can manipulate and to creating files that tell GIAnT what to do. The general steps to use GIAnT are below.
- Download Data
- Identify Area of Interest
- Subset (Crop) Data to Area of Interest
- Prepare Data for GIAnT
- Adjust file names
- Remove potentially disruptive default values (optional)
- Convert data from '.tiff' to '.flt' format
- Create Input Files for GIAnT
- Create 'ifg.list'
- Create 'date.mli.par'
- Make prepxml_SBAS.py
- Run prepxml_SBAS.py
- Make userfn.py
- Run GIAnT
- PrepIgramStack.py*
- ProcessStack.py
- SBASInvert.py
- SBASxval.py
- Data Visualization
<br>
The steps from PrepIgramStack.py and above have been completed for you in order to save disk space and computation time. This allows us to concentrate on the usage of GIAnT and data visualization. Some of the code to create the prepatory files (e.g., 'ifg.list', 'date.mli.par', etc.) have been incldued for your potential use. More information about GIAnT can be found here: (<a href="http://earthdef.caltech.edu/projects/giant/wiki" target="_blank">http://earthdef.caltech.edu/projects/giant/wiki</a>).
<hr>
<font face="Calibri" size="5" color="darkred"> <b>Important Note about JupyterHub</b> </font>
<br><br>
<font face="Calibri" size="3"> <b>Your JupyterHub server will automatically shutdown when left idle for more than 1 hour. Your notebooks will not be lost but you will have to restart their kernels and re-run them from the beginning. You will not be able to seamlessly continue running a partially run notebook.</b> </font>
<font face='Calibri'><font size='5'><b>0. Import Python Libraries:</b></font><br><br>
<font size='3'><b>Import the Python libraries and modules we will need to run this lab:</b></font>
```
%%capture
from datetime import date
import glob
import h5py # for is_hdf5
import os
import shutil
from osgeo import gdal
import matplotlib.pyplot as plt
import matplotlib.animation
from matplotlib import rc
import numpy as np
from IPython.display import HTML
import opensarlab_lib as asfn
asfn.jupytertheme_matplotlib_format()
```
<font face='Calibri'><font size='5'><b>1. Transfer data to a local directory</b></font><br>
<font size='3'>The data cube (referred to as a stack in the GIAnT documentation and code) and several other needed files have been created and stored in the GIAnT server. We will download this data to a local directory and unzip it. </font></font>
<font face="Calibri" size="3"> Before we download anything, <b>create a working directory for this analysis and change into it:</b> </font>
```
path = f"{os.getcwd()}/2019/lab_9_data"
if not os.path.exists(path):
os.makedirs(path)
os.chdir(path)
print(f"Current working directory: {os.getcwd()}")
```
<font face = 'Calibri' size='3'>First step is to find the zip file and download it to a local directory. This zip file has been placed in the S3 bucket for this class.
<br><br>
<b>Display the contents of the S3 bucket:</b></font>
```
!aws s3 ls --region=us-west-2 --no-sign-request s3://asf-jupyter-data-west/
```
<font face = 'Calibri' size='3'><b>Copy the desired file ('Lab9Files.zip') to your data directory:</b></font>
```
!aws s3 cp --region=us-west-2 --no-sign-request s3://asf-jupyter-data-west/Lab9Files.zip .
```
<font face='Calibri'><font size='3'><b>Create the directories where we will perform the GIAnT analysis and store the data:</b></font>
```
stack_path = f"{os.getcwd()}/Stack" # directory GIAnT prefers to access and store data steps.
if not os.path.exists(stack_path):
os.makedirs(stack_path)
```
<font face='Calibri'><font size='3'><b>Extract the zipped file to path and delete it:</b></font>
```
zipped = 'Lab9Files.zip'
asfn.asf_unzip(path, zipped)
if os.path.exists(zipped):
os.remove(zipped)
```
<font face='Calibri' size='3'>The files have been extracted and placed in a folder called 'Lab9Files'. <b>Move the amplitude image, data.xml, date.mli.par, and sbas.xml files to path and RAW-STACK.h5 to stack_path:</b></font>
```
temp_dir = f"{path}/Lab9Files"
if not os.path.exists(f"{stack_path}/RAW-STACK.h5"):
shutil.move(f"{temp_dir}/RAW-STACK.h5", stack_path)
files = glob.glob(f"{temp_dir}/*.*")
for file in files:
if os.path.exists(file):
shutil.move(file, path)
if os.path.exists(temp_dir):
os.rmdir(temp_dir)
```
<font face='Calibri'><font size='5'><b>2. Create Input Files And Code for GIAnT</b></font>
<br>
<font size ='3'>The code below shows how to create the input files and specialty code that GIAnT requires. For this lab, 'ifg.list' is not needed, 'date.mli.par' has already been provided, 'prepxml_SBAS.py' is not needed as the 'sbas.xml' and 'data.xml' files it would create have already been provided, and 'userfn.py' is not needed as we are skipping the step in which it would be used. <br>The files that would be created are listed below.
<br>
- ifg.list
- List of the interferogram properties including master and slave date, perpendicular baseline, and sensor.
- date.mli.par
- File from which GIAnT pulls requisite information about the sensor.
- This is specifically for GAMMA files. When using other interferogram processing techniques, an alternate file is required.
- prepxml_SBAS.py
- Python function to create an xml file that specifies the processing options to GIAnT.
- This must be modified by the user for their particular application.
- userfn.py
- Python function to map the interferogram dates to a phyiscal file on disk.
- This must be modified by the user for their particular application.
</font>
</font>
<font face='Calibri' size='4'> <b>2.1 Create 'ifg.list' File </b> </font> </font>
<br>
<font face='Calibri' size='3'> This will create simple 4 column text file will communicate network information to GIAnT. It will be created within the <b>GIAnT</b> folder.
<br><br>
<b>This step has already been done, so we will not actually create the 'ifg.list' file. This code is displayed for your potential future use.</b></font>
```
"""
# Get one of each file name. This assumes the unwrapped phase geotiff has been converted to a '.flt' file
files = [f for f in os.listdir(datadirectory) if f.endswith('_unw_phase.flt')]
# Get all of the master and slave dates.
masterDates,slaveDates = [],[]
for file in files:
masterDates.append(file[0:8])
slaveDates.append(file[9:17])
# Sort the dates according to the master dates.
master_dates,sDates = (list(t) for t in zip(*sorted(zip(masterDates,slaveDates))))
with open( os.path.join('GIAnT', 'ifg.list'), 'w') as fid:
for i in range(len(master_dates)):
masterDate = master_dates[i] # pull out master Date (first set of numbers)
slaveDate = sDates[i] # pull out slave Date (second set of numbers)
bperp = '0.0' # according to JPL notebooks
sensor = 'S1' # according to JPL notebooks
fid.write(f'{masterDate} {slaveDate} {bperp} {sensor}\n') # write values to the 'ifg.list' file.
"""
```
<font face='Calibri'><font size='3'>You may notice that the code above sets the perpendicular baseline to a value of 0.0 m. This is not the true perpendicular baseline. That value can be found in metadata file (titled '$<$master timestamp$>$_$<$slave timestamp$>$.txt') that comes with the original interferogram. Generally, we would want the true baseline for each interferogram. However, since Sentinel-1 has such a short baseline, a value of 0.0 m is sufficient for our purposes. </font></font>
<font face='Calibri' size='4'> <b>2.2 Create 'date.mli.par' File </b></font>
<br>
<font face='Calibri' size='3'>As we are using GAMMA products, we must create a 'date.mli.par' file from which GIAnT will pull necessary information. If another processing technique is used to create the interferograms, an alterante file name and file inputs are required.
<br><br>
<b>Again, this step has already been completed and the code is only displayed for your potential future use.</b></font>
```
"""
# Create file 'date.mli.par'
# Get file names
files = [f for f in os.listdir(datadirectory) if f.endswith('_unw_phase.flt')]
# Get WIDTH (xsize) and FILE_LENGTH (ysize) information
ds = gdal.Open(datadirectory+files[0], gdal.GA_ReadOnly)
type(ds)
nLines = ds.RasterYSize
nPixels = ds.RasterXSize
trans = ds.GetGeoTransform()
ds = None
# Get the center line UTC time stamp; can also be found inside <date>_<date>.txt file and hard coded
dirName = os.listdir('ingrams')[0] # get original file name (any file can be used; the timestamps are different by a few seconds)
vals = dirName.split('-') # break file name into parts using the separator '-'
tstamp = vals[2][9:16] # extract the time stamp from the 2nd datetime (could be the first)
c_l_utc = int(tstamp[0:2])*3600 + int(tstamp[2:4])*60 + int(tstamp[4:6])
rfreq = 299792548.0 / 0.055465763 # radar frequency; speed of light divided by radar wavelength of Sentinel1 in meters
# write the 'date.mli.par' file
with open(os.path.join(path, 'date.mli.par'), 'w') as fid:
# Method 1
fid.write(f'radar_frequency: {rfreq} \n') # when using GAMMA products, GIAnT requires the radar frequency. Everything else is in wavelength (m)
fid.write(f'center_time: {c_l_utc} \n') # Method from Tom Logan's prepGIAnT code; can also be found inside <date>_<date>.txt file and hard coded
fid.write( 'heading: -11.9617913 \n') # inside <date>_<date>.txt file; can be hardcoded or set up so code finds it.
fid.write(f'azimuth_lines: {nLines} \n') # number of lines in direction of the satellite's flight path
fid.write(f'range_samples: {nPixels} \n') # number of pixels in direction perpendicular to satellite's flight path
fid.close() # close the file
"""
```
<font face='Calibri'><font size='4'><b>2.3 Make prepxml_SBAS.py</b> </font>
<br>
<font size='3'>We will create a prepxml_SBAS.py function and put it into our GIAnT working directory. Again, this is shown for anyone that may want to use GIAnT on their own.<br>If we do wish to change 'sbas.xml' or 'data.xml', this can be done by creating and running a new 'prepxml_SBAS.py'. </font>
</font>
<font face='Calibri'> <font size='3'><b>2.3.1 Necessary prepxml_SBAS.py edits</b></font>
<br>
<font size='3'> GIAnT comes with an example prepxml_SBAS.py, but requries significant edits for our purposes. These alterations have already been made, so we don't have to do anything now, but it is good to know the kinds of things that have to be altered. The details of some of these options can be found in the GIAnT documentation. The rest must be found in the GIAnT processing files themselves, most notably the tsxml.py and tsio.py functions. <br>The following alterations were made:
<br>
- Changed 'example' ► 'date.mli.par'
- Removed 'xlim', 'ylim', 'ref_x_lim', and 'ref_y_lim'
- These are used for clipping the files in GIAnT. As we have already done this, it is not necessary.
- Removed latfile='lat.map' and lonfile='lon.map'
- These are optional inputs for the latitude and longitude maps.
- Removed hgtfile='hgt.map'
- This is an optional altitude file for the sensor.
- Removed inc=21.
- This is the optional incidence angle information.
- It can be a constant float value or incidence angle file.
- For Sentinel1, it varies from 29.1-46.0°.
- Removed masktype='f4'
- This is the mask designation.
- We are not using any masks for this.
- Changed unwfmt='RMG' ► unwfmt='GRD'
- Read data using GDAL.
- Removed demfmt='RMG'
- Changed corfmt='RMG' ► corfmt='GRD'
- Read data using GDAL.
- Changed nvalid=30 -> nvalid=1
- This is the minimum number of interferograms in which a pixel must be coherent. A particular pixel will be included only if its coherence is above the coherence threshold, cohth, in more than nvalid number of interferograms.
- Removed atmos='ECMWF'
- This is an amtospheric correction command. It depends on a library called 'pyaps' developed for GIAnT. This library has not been installed yet.
- Changed masterdate='19920604' ► masterdate='20161119'
- Use our actual masterdate.
- I simply selected the earliest date as the masterdate.
</font>
<font face='Calibri' size='3'>Defining a reference region is a potentially important step. This is a region at which there should be no deformation. For a volcano, this should be some significant distance away from the volcano. GIAnT has the ability to automatically select a reference region which we will use for this exercise. <br>Below is an example of how the reference region would be defined. If we look at the prepxml_SBAS.py code below, ref_x_lim and ref_y_lim, the pixel based location of the reference region, is within the code, but has been commented out.
<br><br>
<b>Define reference region:</b></font>
```
ref_x_lim, ref_y_lim = [0, 10], [95, 105]
```
<font face='Calibri' size='3'>Below is an example of how the reference region would be defined. Look at the prepxml_SBAS.py code below. Note that ref_x_lim and ref_y_lim (the pixel based location of the reference region) are within the code.
<br><br>
<b>This has already been completed but the code is here as an example script for creating XML files for use with the SBAS processing chain.</b></font>
```
'''
#!/usr/bin/env python
import tsinsar as ts
import argparse
import numpy as np
def parse():
parser= argparse.ArgumentParser(description='Preparation of XML files for setting up the processing chain. Check tsinsar/tsxml.py for details on the parameters.')
parser.parse_args()
parse()
g = ts.TSXML('data')
g.prepare_data_xml(
'date.mli.par', proc='GAMMA',
#ref_x_lim = [{1},{2}], ref_y_lim=[{3},{4}],
inc = 21., cohth=0.10,
unwfmt='GRD', corfmt='GRD', chgendian='True', endianlist=['UNW','COR'])
g.writexml('data.xml')
g = ts.TSXML('params')
g.prepare_sbas_xml(nvalid=1, netramp=True, demerr=False, uwcheck=False, regu=True, masterdate='{5}', filt=1.0)
g.writexml('sbas.xml')
############################################################
# Program is part of GIAnT v1.0 #
# Copyright 2012, by the California Institute of Technology#
# Contact: [email protected] #
############################################################
'''
```
<font face='Calibri' size='3'><b>Set the master date and create a script for creating XML files for use with the SBAS processing chain: </b></font>
```
#files = [f for f in os.listdir(datadirectory) if f.endswith('_unw_phase.flt')]
#master_date = min([files[i][0:8] for i in range(len(files))], key=int)
master_date = '20161119'
prepxml_SBAS_Template = '''
#!/usr/bin/env python
"""Example script for creating XML files for use with the SBAS processing chain. This script is supposed to be copied to the working directory and modified as needed."""
import tsinsar as ts
import argparse
import numpy as np
def parse():
parser= argparse.ArgumentParser(description='Preparation of XML files for setting up the processing chain. Check tsinsar/tsxml.py for details on the parameters.')
parser.parse_args()
parse()
g = ts.TSXML('data')
g.prepare_data_xml(
'date.mli.par', proc='GAMMA',
#ref_x_lim = [{1},{2}], ref_y_lim=[{3},{4}],
inc = 21., cohth=0.10,
unwfmt='GRD', corfmt='GRD', chgendian='True', endianlist=['UNW','COR'])
g.writexml('data.xml')
g = ts.TSXML('params')
g.prepare_sbas_xml(nvalid=1, netramp=True, demerr=False, uwcheck=False, regu=True, masterdate='{5}', filt=1.0)
g.writexml('sbas.xml')
############################################################
# Program is part of GIAnT v1.0 #
# Copyright 2012, by the California Institute of Technology#
# Contact: [email protected] #
############################################################
'''
with open(os.path.join(path,'prepxml_SBAS.py'), 'w') as fid:
fid.write(prepxml_SBAS_Template.format(path,ref_x_lim[0],ref_x_lim[1],ref_y_lim[0],ref_y_lim[1],master_date))
```
<font face='Calibri'><font size='3'>To create a new 'sbas.xml' and 'data.xml' file, we would modify the above code to give new parameters and to write to the appropriate folder (e.g., to change the time filter from 1 year to none and to write to the directory in which we are working; 'filt=1.0' -> 'filt=0.0'; and 'os.path.join(path,'prepxml_SBAS.py') -> 'prepxml_SBAS.py' OR '%cd ~' into your home directory). Then we would run it below. </font></font>
<font face='Calibri' size='4'> <b>2.4 Run prepxml_SBAS.py </b> </font>
<br>
<font face='Calibri' size='3'> Here we run <b>prepxml_SBAS.py</b> to create the 2 needed files</font>
- data.xml
- sbas.xml
<font face='Calibri' size='3'> To use MinTS, we would run <b>prepxml_MinTS.py</b> to create</font>
- data.xml
- mints.xml
<font face='Calibri' size='3'> These files are needed by <b>PrepIgramStack.py</b>.
<br>
We must first switch to the GIAnT folder in which <b>prepxml_SBAS.py</b> is contained, then call it. Otherwise, <b>prepxml_SBAS.py</b> will not be able to find the file 'date.mli.par', which holds necessary processing information.
<br><br>
<b>Create a variable holding the general path to the GIAnT code base and download GIAnT from the `asf-jupyter-data-west` S3 bucket, if not present.</b>
<br>
GIAnT is no longer supported (Python 2). This unofficial version of GIAnT has been partially ported to Python 3 to run this notebook. Only the portions of GIAnT used in this notebook have been tested.
</font>
```
giant_path = "/home/jovyan/.local/GIAnT/SCR"
if not os.path.exists("/home/jovyan/.local/GIAnT"):
download_path = 's3://asf-jupyter-data-west/GIAnT_5_21.zip'
output_path = f"/home/jovyan/.local/{os.path.basename(download_path)}"
!aws --region=us-west-2 --no-sign-request s3 cp $download_path $output_path
if os.path.isfile(output_path):
!unzip $output_path -d /home/jovyan/.local/
os.remove(output_path)
```
<font face='Calibri' size='3'><b>Run prepxml_SBAS.py and check the output to confirm that your input values are correct:</b></font>
```
# !python $giant_path/prepxml_SBAS.py # this has already been done. data.xml and sbas.xml already exist
```
<font face='Calibri' size='3'><b>Make sure the two requisite xml files (data.xml and sbas.xml) were produced after running prepxml_SBAS.py.</b></font>
<br><br>
<font face='Calibri' size='3'><b>Display the contents of data.xml:</b></font>
```
if os.path.exists('data.xml'):
!cat data.xml
```
<font face='Calibri' size='3'><b>Display the contents of sbas.xml:</b></font>
```
if os.path.exists('sbas.xml'):
!cat sbas.xml
```
<font face='Calibri'><font size='4'><b>2.5 Create userfn.py</b></font>
<br>
<font size='3'>Before running the next piece of code, <b>PrepIgramStack.py</b>, we must create a python file called <b>userfn.py</b>. This file maps the interferogram dates to a physical file on disk. This python file must be in our working directory, <b>/GIAnT</b>. We can create this file from within the notebook using python.
<br><br>
<b>Again, this step has already been preformed and is unnecessary, but the code is provided as an example.</b></font>
```
userfnTemplate = """
#!/usr/bin/env python
import os
def makefnames(dates1, dates2, sensor):
dirname = '{0}'
root = os.path.join(dirname, dates1+'-'+dates2)
#unwname = root+'_unw_phase.flt' # for potentially disruptive default values kept.
unwname = root+'_unw_phase_no_default.flt' # for potentially disruptive default values removed.
corname = root+'_corr.flt'
return unwname, corname
"""
with open('userfn.py', 'w') as fid:
fid.write(userfnTemplate.format(path))
```
<font face='Calibri'><font size='5'><b>3. Run GIAnT</b></font>
<br>
<font size='3'>We have now created all of the necessary files to run GIAnT. The full GIAnT process requires 3 function calls.
- PrepIgramStack.py
- After PrepIgramStack.py, we will actually start running GIAnT.
- ProcessStack.py
- SBASInvert.py
- SBASxval.py
- This 4th function call is not necessary and we will skip it, but provides some error estimation that can be useful.
<font face='Calibri' size='4'> <b>3.1 Run PrepIgramStack.py </b> </font>
<br>
<font face='Calibri' size='3'> Here we would run <b>PrepIgramStack.py</b> to create the files for GIAnT. This would read in the input data and the files we previously created and output an HDF5 file. As we do not actually need to call this, it is currently set up to display some help information.<br>
Inputs:
- ifg.list
- data.xml
- sbas.xml
- interferograms
- coherence files
Outputs:
- RAW-STACK.h5
- PNG previews under 'GIAnT/Figs/Igrams'
</font>
<br>
<font size='3'><b>Display some help information for PrepIgramStack.py:</b></font>
```
!python $giant_path/PrepIgramStack.py -h
```
<font size='3'><b>Run PrepIgramStack.py (in our case, this has already been done):</b></font>
```
#!python $giant_path/PrepIgramStack.py
```
<hr>
<font face='Calibri'><font size='3'>PrepIgramStack.py creates a file called 'RAW-STACK.h5'.
<br><br>
<b>Verify that RAW-STACK.h5 is an HDF5 file as required by the rest of GIAnT.</b></font>
```
raw_h5 = f"{stack_path}/RAW-STACK.h5"
if not h5py.is_hdf5(raw_h5):
print(f"Not an HDF5 file: {raw_h5}")
else:
print(f"Confirmed: {raw_h5} is an HDF5 file.")
```
<font face='Calibri' size='4'> <b>3.2 Run ProcessStack.py </b> </font>
<br>
<font face='Calibri' size='3'> This seems to be an optional step. Does atmospheric corrections and estimation of orbit residuals. <br>
Inputs:
- HDF5 files from PrepIgramStack.py, RAW-STACK.h5
- data.xml
- sbas.xml
- GPS Data (optional; we don't have this)
- Weather models (downloaded automatically)
Outputs:
- HDF5 files, PROC-STACK.h5
These files are then fed into SBAS.
</font>
<br><br>
<font face='Calibri' size='3'><b>Display the help information for ProcessStack.py:</b></font>
```
!python $giant_path/ProcessStack.py -h
```
<font face='Calibri' size='3'><b>Run ProcessStack.py:</b></font>
```
!python $giant_path/ProcessStack.py
```
<hr>
<font face='Calibri'><font size='3'>ProcessStack.py creates a file called 'PROC-STACK.h5'.
<br><br>
<b>Verify that PROC-STACK.h5 is an HDF5 file as required by the rest of GIAnT:</b></font>
```
proc_h5 = f"{stack_path}/PROC-STACK.h5"
if not h5py.is_hdf5(proc_h5):
print(f"Not an HDF5 file: {proc_h5}")
else:
print(f"Confirmed: {proc_h5} is an HDF5 file.")
```
<font face='Calibri' size='4'> <b>3.3 Run SBASInvert.py </b></font>
<br>
<font face='Calibri' size='3'> Actually do the time series.
Inputs
- HDF5 file, PROC-STACK.h5
- data.xml
- sbas.xml
Outputs
- HDF5 file: LS-PARAMS.h5
<b>Display the help information for SBASInvert.py:</b>
</font>
```
!python $giant_path/SBASInvert.py -h
```
<font face='Calibri' size='3'><b>Run SBASInvert.py:</b></font>
```
!python $giant_path/SBASInvert.py
```
<hr>
<font face='Calibri'><font size='3'>SBASInvert.py creates a file called 'LS-PARAMS.h5'.
<br><br>
<b>Verify that LS-PARAMS.h5 is an HDF5 file as required by the rest of GIAnT:</b></font>
```
params_h5 = f"{stack_path}/LS-PARAMS.h5"
if not h5py.is_hdf5(params_h5):
print(f"Not an HDF5 file: {params_h5}")
else:
print(f"Confirmed: {params_h5} is an HDF5 file.")
```
<font face='Calibri' size='4'> <b>3.4 Run SBASxval.py </b></font>
<br>
<font face='Calibri' size='3'> Get an uncertainty estimate for each pixel and epoch using a Jacknife test. We are skipping this function as we won't be doing anything with its output and it takes a significant amount of time to run relative to the other GIAnT functions.
Inputs:
- HDF5 files, PROC-STACK.h5
- data.xml
- sbas.xml
Outputs:
- HDF5 file, LS-xval.h5
<br>
<b>Display the help information for SBASxval.py:</b></font>
```
#!python $giant_path/SBASxval.py -h
```
<font face='Calibri' size='3'><b>Run SBASxval.py:</b></font>
```
#!python $giant_path/SBASxval.py
```
<hr>
<font face='Calibri'><font size='3'>SBASxval.py creates a file called 'LS-xval.h5'.
<br><br>
<b>Verify that LS-xval.h5 is an HDF5 file as required by the rest of GIAnT:</b></font>
```
'''
xval_h5 = f"{stack_path}/LS-xval.h5"
if not h5py.is_hdf5(xval_h5):
print(f"Not an HDF5 file: {xval_h5}")
else:
print(f"Confirmed: {xval_h5} is an HDF5 file.")
'''
```
<font face='Calibri' size='5'><b>4. Data Visualization</b></font>
<br>
<font face='Calibri' size='3'>Now we visualize the data. This is largely copied from Lab 4.
<br><br>
<b>Create a directory in which to store our plots and move into it:</b></font>
```
plot_dir = f"{path}/plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
if os.path.exists(plot_dir):
os.chdir(plot_dir)
print(f"Current Working Directory: {os.getcwd()}")
```
<font face='Calibri' size='3'><b>Load the stack produced by GIAnT and read it into an array so we can manipulate and display it:</b></font>
```
f = h5py.File(params_h5, 'r')
```
<font face='Calibri' size='3'><b>List all groups ('key's) within the HDF5 file that has been loaded into the object 'f'</b></font>
```
print("Keys: %s" %f.keys())
```
<font face='Calibri' size='3'>Details on what each of these keys means can be found in the GIAnT documentation. For now, the only keys with which we are concerned are <b>'recons'</b> (the filtered time series of each pixel) and <b>'dates'</b> (the dates of acquisition). It is important to note that the dates are given in a type of Julian Day number called Rata Die number. This will have to be converted later, but this can easily be done via one of several different methods in Python.</font>
<br><br>
<font face='Calibri' size='3'><b>Get our data from the stack:</b></font>
```
data_cube = f['recons'][()]
```
<font face='Calibri' size='3'><b>Get the dates for each raster from the stack:</b></font>
```
dates = list(f['dates']) # these dates appear to be given in Rata Die style: floor(Julian Day Number - 1721424.5).
if data_cube.shape[0] is not len(dates):
print('Problem:')
print('Number of rasters in data_cube: ',data_cube.shape[0])
print('Number of dates: ',len(dates))
```
<font face='Calibri' size='3'><b>Plot and save amplitude image with transparency determined by alpha (SierraNegra-dBScaled-AmplitudeImage.png):</b></font>
```
plt.rcParams.update({'font.size': 14})
radar_tiff = f"{path}/20161119-20170106_amp.tiff"
radar=gdal.Open(radar_tiff)
im_radar = radar.GetRasterBand(1).ReadAsArray()
radar = None
dbplot = np.ma.log10(im_radar)
vmin=np.percentile(dbplot,3)
vmax=np.percentile(dbplot,97)
fig = plt.figure(figsize=(18,10)) # Initialize figure with a size
ax1 = fig.add_subplot(111) # 221 determines: 2 rows, 2 plots, first plot
ax1.imshow(dbplot, cmap='gray',vmin=vmin,vmax=vmax,alpha=1);
plt.title('Example dB-scaled SAR Image for Ifgrm 20161119-20170106')
plt.grid()
plt.savefig('SierraNegra-dBScaled-AmplitudeImage.png',dpi=200,transparent='false')
```
<font face='Calibri' size='3'><b>Display and save an overlay of the clipped deformation map and amplitude image (SierraNegra-DeformationComposite.png):</b></font>
```
# We will define a short function that can plot an overaly of our radar image and deformation map.
def defNradar_plot(deformation, radar):
fig = plt.figure(figsize=(18, 10))
ax = fig.add_subplot(111)
vmin = np.percentile(radar, 3)
vmax = np.percentile(radar, 97)
ax.imshow(radar, cmap='gray', vmin=vmin, vmax=vmax)
fin_plot = ax.imshow(deformation, cmap='RdBu', vmin=-50.0, vmax=50.0, alpha=0.75)
fig.colorbar(fin_plot, fraction=0.24, pad=0.02)
ax.set(title="Integrated Defo [mm] Overlain on Clipped db-Scaled Amplitude Image")
plt.grid()
# Get deformation map and radar image we wish to plot
deformation = data_cube[data_cube.shape[0]-1]
# Call function to plot an overlay of our deformation map and radar image.
defNradar_plot(deformation, dbplot)
plt.savefig('SierraNegra-DeformationComposite.png', dpi=200, transparent='false')
```
<font face='Calibri' size='3'><b>Convert from Rata Die number (similar to Julian Day number) contained in 'dates' to Gregorian date:</b></font>
```
tindex = []
for d in dates:
tindex.append(date.fromordinal(int(d)))
```
<font face='Calibri' size='3'><b>Create an animation of the deformation</b></font>
```
%%capture
fig = plt.figure(figsize=(14, 8))
ax = fig.add_subplot(111)
ax.axis('off')
vmin=np.percentile(data_cube.flatten(), 5)
vmax=np.percentile(data_cube.flatten(), 95)
im = ax.imshow(data_cube[0], cmap='RdBu', vmin=-50.0, vmax=50.0)
ax.set_title("Animation of Deformation Time Series - Sierra Negra, Galapagos")
fig.colorbar(im)
plt.grid()
def animate(i):
ax.set_title("Date: {}".format(tindex[i]))
im.set_data(data_cube[i])
ani = matplotlib.animation.FuncAnimation(fig, animate, frames=data_cube.shape[0], interval=400)
```
<font face="Calibri" size="3"><b>Configure matplotlib's RC settings for the animation:</b></font>
```
rc('animation', embed_limit=10.0**9)
```
<font face="Calibri" size="3"><b>Create a javascript animation of the time-series running inline in the notebook:</b></font>
```
HTML(ani.to_jshtml())
```
<font face="Calibri" size="3"><b>Save the animation as a 'gif' file (SierraNegraDeformationTS.gif):</b></font>
```
ani.save('SierraNegraDeformationTS.gif', writer='pillow', fps=2)
```
<font face='Calibri'><font size='5'><b>5. Alter the time filter parameter</b></font><br>
<font size='3'>Looking at the video above, you may notice that the deformation has a very smoothed appearance. This may be because of our time filter which is currently set to 1 year ('filt=1.0' in the prepxml_SBAS.py code). Let's repeat the lab from there with 2 different time filters. <br>First, using no time filter ('filt=0.0') and then using a 1 month time filter ('filt=0.082'). Change the output file name for anything you want saved (e.g., 'SierraNegraDeformationTS.gif' to 'YourDesiredFileName.gif'). Otherwise, it will be overwritten. <br><br>How did these changes affect the output time series?<br>How might we figure out the right filter length?<br>What does this say about the parameters we select?
<font face='Calibri'><font size='5'><b>6. Clear data (optional)</b></font>
<br>
<font size='3'>This lab has produced a large quantity of data. If you look at this notebook in your home directory, it should now be ~13 MB. This can take a long time to load in a Jupyter Notebook. It may be useful to clear the cell outputs. <br>To clear the cell outputs, go Cell->All Output->Clear. This will clear the outputs of the Jupyter Notebook and restore it to its original size of ~60 kB. This will not delete any of the files we have created. </font>
</font>
<font face="Calibri" size="2"> <i>GEOS 657-Lab9-InSARTimeSeriesAnalysis.ipynb - Version 1.2.0 - April 2021
<br>
<b>Version Changes:</b>
<ul>
<li>from osgeo import gdal</li>
<li>namespace asf_notebook</li>
</ul>
</i>
</font>
|
github_jupyter
|
# RDF graph processing against the integrated POIs
#### Auxiliary function to format SPARQL query results as a data frame:
```
import pandas as pds
def sparql_results_frame(qres):
cols = qres.vars
out = []
for row in qres:
item = []
for c in cols:
item.append(row[c])
out.append(item)
pds.set_option('display.max_colwidth', 0)
return pds.DataFrame(out, columns=cols)
```
#### Create an **RDF graph** with the triples resulting from data integration:
```
from rdflib import Graph,URIRef
g = Graph()
g.parse('./output/integrated.nt', format="nt")
# Get graph size (in number of statements)
len(g)
```
#### Number of statements per predicate:
```
# SPARQL query is used to retrieve the results from the graph
qres = g.query(
"""SELECT ?p (COUNT(*) AS ?cnt) {
?s ?p ?o .
} GROUP BY ?p ORDER BY DESC(?cnt)""")
# display unformatted query results
#for row in qres:
# print("%s %s" % row)
# display formatted query results
sparql_results_frame(qres)
```
#### Identify POIs having _**name**_ similar to a user-specified one:
```
# SPARQL query is used to retrieve the results from the graph
qres = g.query(
"""PREFIX slipo: <http://slipo.eu/def#>
PREFIX provo: <http://www.w3.org/ns/prov#>
SELECT DISTINCT ?poiURI ?title
WHERE { ?poiURI slipo:name ?n .
?n slipo:nameValue ?title .
FILTER regex(?title, "^Achilleio", "i")
}
""")
# display query results
sparql_results_frame(qres)
```
#### **Fusion action** regarding a specific POI:
```
# SPARQL query is used to retrieve the results from the graph
qres = g.query(
"""PREFIX slipo: <http://slipo.eu/def#>
PREFIX provo: <http://www.w3.org/ns/prov#>
SELECT ?prov ?defaultAction ?conf
WHERE { ?poiURI provo:wasDerivedFrom ?prov .
?poiURI slipo:name ?n .
?n slipo:nameValue ?title .
?poiURI slipo:address ?a .
?a slipo:street ?s .
?prov provo:default-fusion-action ?defaultAction .
?prov provo:fusion-confidence ?conf .
FILTER regex(?title, "Achilleio", "i")
}
""")
print("Query returned %d results." % len(qres) )
# display query results
sparql_results_frame(qres)
```
#### **Pair of original POIs** involved in this fusion:
```
# SPARQL query is used to retrieve the results from the graph
qres = g.query(
"""PREFIX slipo: <http://slipo.eu/def#>
PREFIX provo: <http://www.w3.org/ns/prov#>
SELECT ?leftURI ?rightURI ?conf
WHERE { <http://www.provbook.org/d494ddbd-9a98-39b0-bec9-0477636c42f7> provo:left-uri ?leftURI .
<http://www.provbook.org/d494ddbd-9a98-39b0-bec9-0477636c42f7> provo:right-uri ?rightURI .
<http://www.provbook.org/d494ddbd-9a98-39b0-bec9-0477636c42f7> provo:fusion-confidence ?conf .
}
""")
print("Query returned %d results." % len(qres))
# display pair of POI URIs along with the fusion confidence
sparql_results_frame(qres)
```
#### Values per attribute **before and after fusion** regarding this POI:
```
# SPARQL query is used to retrieve the results from the graph
qres = g.query(
"""PREFIX slipo: <http://slipo.eu/def#>
PREFIX provo: <http://www.w3.org/ns/prov#>
SELECT DISTINCT ?valLeft ?valRight ?valFused
WHERE { ?poiURI provo:wasDerivedFrom <http://www.provbook.org/d494ddbd-9a98-39b0-bec9-0477636c42f7> .
?poiURI provo:appliedAction ?action .
?action provo:attribute ?attr .
?action provo:left-value ?valLeft .
?action provo:right-value ?valRight .
?action provo:fused-value ?valFused .
}
""")
print("Query returned %d results." % len(qres))
# print query results
sparql_results_frame(qres)
```
# POI Analytics
#### Once integrated POI data has been saved locally, analysis can be perfomed using tools like **pandas** _DataFrames_, **geopandas** _GeoDataFrames_ or other libraries.
#### Unzip exported CSV file with the results of data integration:
```
import os
import zipfile
with zipfile.ZipFile('./output/corfu-integrated-pois.zip','r') as zip_ref:
zip_ref.extractall("./output/")
os.rename('./output/points.csv', './output/corfu_pois.csv')
```
#### Load CSV data in a _DataFrame_:
```
import pandas as pd
pois = pd.read_csv('./output/corfu_pois.csv', delimiter='|', error_bad_lines=False)
# Geometries in the exported CSV file are listed in Extended Well-Known Text (EWKT)
# Since shapely does not support EWKT, update the geometry by removing the SRID value from EWKT
pois['the_geom'] = pois['the_geom'].apply(lambda x: x.split(';')[1])
pois.head()
```
#### Create a _GeoDataFrame_:
```
import geopandas
from shapely import wkt
pois['the_geom'] = pois['the_geom'].apply(wkt.loads)
gdf = geopandas.GeoDataFrame(pois, geometry='the_geom')
```
#### Display the location of the exported POIs on a **simplified plot** using _matplotlib_:
```
%matplotlib inline
import matplotlib.pyplot as plt
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
# Restrict focus to Germany:
ax = world[world.name == 'Greece'].plot(
color='white', edgecolor='black')
# Plot the contents of the GeoDataFrame in blue dots:
gdf.plot(ax=ax, color='blue')
plt.show()
```
|
github_jupyter
|
# Keras Intro: Shallow Models
Keras Documentation: https://keras.io
In this notebook we explore how to use Keras to implement 2 traditional Machine Learning models:
- **Linear Regression** to predict continuous data
- **Logistic Regression** to predict categorical data
## Linear Regression
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
```
### 0. Load data
```
df = pd.read_csv('../data/weight-height.csv')
df.head()
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
```
### 1. Create Train/Test split
```
from sklearn.model_selection import train_test_split
X = df[['Height']].values
y = df['Weight'].values
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3, random_state=0)
```
### 2. Train Linear Regression Model
```
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, SGD
model = Sequential()
model.add(Dense(1, input_shape=(1,)))
model.summary()
model.compile(Adam(lr=0.9), 'mean_squared_error')
model.fit(X_train, y_train, epochs=40)
```
### 3. Evaluate Model Performance
```
from sklearn.metrics import r2_score
y_train_pred = model.predict(X_train).ravel()
y_test_pred = model.predict(X_test).ravel()
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
plt.plot(X_test, y_test_pred, color='red')
W, B = model.get_weights()
W
B
```
# Classification
### 0. Load Data
```
df = pd.read_csv('../data/user_visit_duration.csv')
df.head()
df.plot(kind='scatter', x='Time (min)', y='Buy')
```
### 1. Create Train/Test split
```
X = df[['Time (min)']].values
y = df['Buy'].values
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3, random_state=0)
```
### 2. Train Logistic Regression Model
```
model = Sequential()
model.add(Dense(1, input_shape=(1,), activation='sigmoid'))
model.summary()
model.compile(SGD(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=40)
ax = df.plot(kind='scatter', x='Time (min)', y='Buy',
title='Purchase behavior VS time spent on site')
t = np.linspace(0, 4)
ax.plot(t, model.predict(t), color='orange')
plt.legend(['model', 'data'])
```
### 3. Evaluate Model Performance
#### Accuracy
```
from sklearn.metrics import accuracy_score
y_train_pred = model.predict_classes(X_train)
y_test_pred = model.predict_classes(X_test)
print("The train accuracy score is {:0.3f}".format(accuracy_score(y_train, y_train_pred)))
print("The test accuracy score is {:0.3f}".format(accuracy_score(y_test, y_test_pred)))
```
#### Confusion Matrix & Classification Report
```
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_test_pred)
def pretty_confusion_matrix(y_true, y_pred, labels=["False", "True"]):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index=labels, columns=pred_labels)
return df
pretty_confusion_matrix(y_test, y_test_pred, ['Not Buy', 'Buy'])
from sklearn.metrics import classification_report
print(classification_report(y_test, y_test_pred))
```
## Exercise
You've just been hired at a real estate investment firm and they would like you to build a model for pricing houses. You are given a dataset that contains data for house prices and a few features like number of bedrooms, size in square feet and age of the house. Let's see if you can build a model that is able to predict the price. In this exercise we extend what we have learned about linear regression to a dataset with more than one feature. Here are the steps to complete it:
1. Load the dataset ../data/housing-data.csv
- create 2 variables called X and y: X shall be a matrix with 3 columns (sqft,bdrms,age) and y shall be a vector with 1 column (price)
- create a linear regression model in Keras with the appropriate number of inputs and output
- split the data into train and test with a 20% test size, use `random_state=0` for consistency with classmates
- train the model on the training set and check its accuracy on training and test set
- how's your model doing? Is the loss decreasing?
- try to improve your model with these experiments:
- normalize the input features:
- divide sqft by 1000
- divide age by 10
- divide price by 100000
- use a different value for the learning rate of your model
- use a different optimizer
- once you're satisfied with training, check the R2score on the test set
```
# Load the dataset ../data/housing-data.csv
df = pd.read_csv('../data/housing-data.csv')
df.head()
df.columns
# create 2 variables called X and y:
# X shall be a matrix with 3 columns (sqft,bdrms,age)
# and y shall be a vector with 1 column (price)
X = df[['sqft', 'bdrms', 'age']].values
y = df['price'].values
# create a linear regression model in Keras
# with the appropriate number of inputs and output
model = Sequential()
model.add(Dense(1, input_shape=(3,)))
model.compile(Adam(lr=0.8), 'mean_squared_error')
# split the data into train and test with a 20% test size
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# train the model on the training set and check its accuracy on training and test set
# how's your model doing? Is the loss decreasing?
model.fit(X_train, y_train, epochs=50)
# check the R2score on training and test set (probably very bad)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
# try to improve your model with these experiments:
# - normalize the input features with one of the rescaling techniques mentioned above
# - use a different value for the learning rate of your model
# - use a different optimizer
df['sqft1000'] = df['sqft']/1000.0
df['age10'] = df['age']/10.0
df['price100k'] = df['price']/1e5
X = df[['sqft1000', 'bdrms', 'age10']].values
y = df['price100k'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = Sequential()
model.add(Dense(1, input_dim=3))
model.compile(Adam(lr=0.1), 'mean_squared_error')
model.fit(X_train, y_train, epochs=50)
# once you're satisfied with training, check the R2score on the test set
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import seaborn as sns
from pylab import rcParams
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
%matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
df=pd.read_csv('TreeData.csv')
# df.head(22)
df.info()
N_TIME_STEPS = 250
N_FEATURES = 128 #128
step = 10 # 20
segments = []
for i in range(0, len(df) - N_TIME_STEPS, step):
ch = []
for j in range(0, N_FEATURES):
ch.append(df.iloc[:, j].values[i: i + N_TIME_STEPS])
segments.append(ch)
labels = []
for i in range(0, len(df) - N_TIME_STEPS, step):
label = stats.mode(df['Label'][i: i + N_TIME_STEPS])[0][0]
labels.append(label)
labelsl = np.asarray(pd.get_dummies(labels), dtype = np.float32)
#print(labelsl)
reshaped_segments = np.asarray(segments, dtype= np.float32).reshape(-1, N_TIME_STEPS, N_FEATURES)
X_train, X_test, y_train, y_test = train_test_split(
reshaped_segments, labelsl, test_size=0.2, random_state=RANDOM_SEED)
print(np.array(segments).shape, reshaped_segments.shape, labelsl[0], len(X_train), len(X_test))
```
# Building the model
```
N_CLASSES = 2
N_HIDDEN_UNITS = 64
# https://medium.com/@curiousily/human-activity-recognition-using-lstms-on-android-tensorflow-for-hackers-part-vi-492da5adef64
def create_LSTM_model(inputs):
W = {
'hidden': tf.Variable(tf.random_normal([N_FEATURES, N_HIDDEN_UNITS])),
'output': tf.Variable(tf.random_normal([N_HIDDEN_UNITS, N_CLASSES]))
}
biases = {
'hidden': tf.Variable(tf.random_normal([N_HIDDEN_UNITS], mean=1.0)),
'output': tf.Variable(tf.random_normal([N_CLASSES]))
}
X = tf.transpose(inputs, [1, 0, 2])
X = tf.reshape(X, [-1, N_FEATURES])
hidden = tf.nn.relu(tf.matmul(X, W['hidden']) + biases['hidden'])
hidden = tf.split(hidden, N_TIME_STEPS, 0)
# Stack 2 LSTM layers
lstm_layers = [tf.contrib.rnn.BasicLSTMCell(N_HIDDEN_UNITS, forget_bias=1.0) for _ in range(2)]
lstm_layers = tf.contrib.rnn.MultiRNNCell(lstm_layers)
outputs, _ = tf.contrib.rnn.static_rnn(lstm_layers, hidden, dtype=tf.float32)
# Get output for the last time step
lstm_last_output = outputs[-1]
return tf.matmul(lstm_last_output, W['output']) + biases['output']
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, N_TIME_STEPS, N_FEATURES], name="input")
Y = tf.placeholder(tf.float32, [None, N_CLASSES])
pred_Y = create_LSTM_model(X)
pred_softmax = tf.nn.softmax(pred_Y, name="y_")
L2_LOSS = 0.0015
l2 = L2_LOSS * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = pred_Y, labels = Y)) + l2
LEARNING_RATE = 0.0025
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(loss)
correct_pred = tf.equal(tf.argmax(pred_softmax, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
```
# Training
```
N_EPOCHS = 50 # 50
BATCH_SIZE = 1024 # 1024
# https://medium.com/@curiousily/human-activity-recognition-using-lstms-on-android-tensorflow-for-hackers-part-vi-492da5adef64
saver = tf.train.Saver()
history = dict(train_loss=[],
train_acc=[],
test_loss=[],
test_acc=[])
sess=tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
train_count = len(X_train)
for i in range(1, N_EPOCHS + 1):
for start, end in zip(range(0, train_count, BATCH_SIZE),
range(BATCH_SIZE, train_count + 1,BATCH_SIZE)):
sess.run(optimizer, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
_, acc_train, loss_train = sess.run([pred_softmax, accuracy, loss], feed_dict={
X: X_train, Y: y_train})
_, acc_test, loss_test = sess.run([pred_softmax, accuracy, loss], feed_dict={
X: X_test, Y: y_test})
history['train_loss'].append(loss_train)
history['train_acc'].append(acc_train)
history['test_loss'].append(loss_test)
history['test_acc'].append(acc_test)
# if i != 1 and i % 10 != 0:
# continue
print(f'epoch: {i} test accuracy: {acc_test} loss: {loss_test}')
predictions, acc_final, loss_final = sess.run([pred_softmax, accuracy, loss], feed_dict={X: X_test, Y: y_test})
print()
print(f'final results: accuracy: {acc_final} loss: {loss_final}')
```
# Evaluation
```
# https://medium.com/@curiousily/human-activity-recognition-using-lstms-on-android-tensorflow-for-hackers-part-vi-492da5adef64
plt.figure(figsize=(12, 8))
plt.plot(np.array(history['train_loss']), "r--", label="Train loss")
plt.plot(np.array(history['train_acc']), "g--", label="Train accuracy")
plt.plot(np.array(history['test_loss']), "r-", label="Test loss")
plt.plot(np.array(history['test_acc']), "g-", label="Test accuracy")
plt.title("Training session's progress over iterations")
plt.legend(loc='upper right', shadow=True)
plt.ylabel('Training Progress (Loss or Accuracy values)')
plt.xlabel('Training Epoch')
plt.ylim(0)
plt.show()
```
# Saving Model
```
import os
file_info = [N_HIDDEN_UNITS, BATCH_SIZE, N_EPOCHS]
dirname = os.path.dirname("nhid-{}_bat-{}_nepoc-{}/dumps/".format(*file_info))
if not os.path.exists(dirname):
os.makedirs(dirname)
dirname = os.path.dirname("nhid-{}_bat-{}_nepoc-{}/logs/".format(*file_info))
if not os.path.exists(dirname):
os.makedirs(dirname)
pickle.dump(predictions, open("nhid-{}_bat-{}_nepoc-{}/dumps/predictions.p".format(*file_info), "wb"))
pickle.dump(history, open("nhid-{}_bat-{}_nepoc-{}/dumps/history.p".format(*file_info), "wb"))
tf.train.write_graph(sess.graph, "nhid-{}_bat-{}_nepoc-{}/logs".format(*file_info), 'har.pbtxt')
saver.save(sess, 'nhid-{}_bat-{}_nepoc-{}/logs/har.ckpt'.format(*file_info))
writer = tf.summary.FileWriter('nhid-{}_bat-{}_nepoc-{}/logs'.format(*file_info))
writer.add_graph(sess.graph)
```
|
github_jupyter
|
# VAST 2017 MC-1
## Задание
Природный заповедник Бунсонг Лекагуль используется местными жителями и туристами для однодневных поездок, ночевок в кемпингах, а иногда и просто для доступа к основным магистралям на противоположных сторонах заповедника.
Входные кабинки заповедника контролируются с целью получения дохода, а также мониторинга использования. Транспортные средства, въезжающие в заповедник и выезжающие из него, должны платить пошлину в зависимости от количества осей (личный автомобиль, развлекательный прицеп, полуприцеп и т.д.).
Это создает поток данных с отметками времени входа / выхода и типом транспортного средства. Есть также другие места в части, которые регистрируют трафик, проходящий через заповедник. Путешествуя по различным частям заповедника, Митч заметил странное поведение транспортных средств, которое, по его мнению, не соответствует видам посетителей парка, которых он ожидал. Если бы Митч мог каким-то образом анализировать поведение автомобилей в парке с течением времени, это сможет помочь ему в его расследовании.
### Пример исходных данных
### Необходимые импорты
```
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from SOM import SOM
import seaborn as sb
```
## Подготовка данных
С помощью языка программирования Python распарсили данные из таблицы, затем объединили в группы сенсоры по Car-Id, тем самым нашли все датчики, которые проехало данное ТС, его путь.
```
data_set = open("Data/Lekagul Sensor Data.csv", "r")
data = data_set.readlines()
data_set.close()
traces = []
gates = set()
for line in data:
args = line.split(";")
gates.add(args[3])
traces.append(args)
gates = sorted(gates)
groupedTraces = []
for t in traces:
if t[1] in groupedTraces:
groupedTraces[t[1]].append(t)
else:
groupedTraces[t[1]] = [t]
```
Вычленяем типа автомобиля из списка путей.
```
target = []
for x in groupedTraces:
target.append(groupedTraces[x][1][2])
targets = []
for rec in target:
if rec == '2P':
targets.append('7')
else:
targets.append(str(rec))
print(targets)
```
Отнормировали датчики по общему количеству пройденных датчиков (если транспортное средство никак не взаимодействовало с сенсором - присваиваем значение 0). В итоге получаем вектор, где координатами являются сенсоры со значениями [0, 1].
```
vectors = []
for name, gt in groupedTraces.items():
groupGates = np.zeros(len(gates))
for t in gt:
groupGates[gates.index(t[3])] += 1
vector = []
for rec in groupGates:
vector.append(str(rec))
# vectors.write(";")
vectors.append(vector)
for vector in vectors:
print(vector)
```
## Заключение
### SOM
По итогу работы с алгоритмом SOM можно выделить следующие плюсы и минусы.
Плюсы:
1. Алгоритм является инструментом снижения размерности и визуализации данных;
2. Отличная видимость кластеров на карте отображения сработавших нейронов совместно с точками данных;
3. Отличная видимость аномалий при любых параметрах, что позволило нам быстро найти ответ на главный вопрос челленджа;
4. Сравнивая интуитивное разбиение и разбиение алгоритмом k-means, получили небольшую погрешность. Это говорит о том, что карта SOM вполне подходит для определения кластеров в данных.
Незначительные минусы: ресурсозатратность, что подразумевает длительную работу алгоритма; ограниченный выбор форм визуализации результата работы алгоритма (SOM-карта и U-матрица).
Лучшая репрезентативность визуализации была достигнута при параметрах размерность карты: 20х20, количество поколений: 10000, инициализация: PCA.
Мы смогли выявить аномалию заметив, что 4-осный грузовик проходят по мрашрутам грузовиков рейнджеров, несмотря на то, что у него нет на это разрешения. В визуализации в раскраске грузовиков рейнджеров отмечены вкрапления цвета 4-осных грузовиков.
|
github_jupyter
|
# Elevation indices
Here we assume that flow directions are known. We read the flow direction raster data, including meta-data, using [rasterio](https://rasterio.readthedocs.io/en/latest/) and parse it to a pyflwdir `FlwDirRaster` object, see earlier examples for more background.
```
# import pyflwdir, some dependencies and convenience methods
import numpy as np
import rasterio
import pyflwdir
# local convenience methods (see utils.py script in notebooks folder)
from utils import quickplot, plt # data specific quick plot method
# read and parse flow direciton data
with rasterio.open("rhine_d8.tif", "r") as src:
flwdir = src.read(1)
crs = src.crs
extent = np.array(src.bounds)[[0, 2, 1, 3]]
flw = pyflwdir.from_array(
flwdir,
ftype="d8",
transform=src.transform,
latlon=crs.is_geographic,
cache=True,
)
# read elevation data
with rasterio.open("rhine_elv0.tif", "r") as src:
elevtn = src.read(1)
```
## height above nearest drain (HAND)
The [hand()](reference.rst#pyflwdir.FlwdirRaster.hand) method uses drainage-normalized topography and flowpaths to delineate the relative vertical distances (drop) to the nearest river (drain) as a proxy for the potential extent of flooding ([Nobre et al. 2016](https://doi.org/10.1002/hyp.10581)). The pyflwdir implementation requires stream mask `drain` and elevation raster `elevtn`. The stream mask is typically determined based on a threshold on [upstream_area()](reference.rst#pyflwdir.FlwdirRaster.upstream_area) or [stream_order()](reference.rst#pyflwdir.FlwdirRaster.stream_order), but can also be set from rasterizing a vector stream file.
```
# first we derive the upstream area map
uparea = flw.upstream_area("km2")
# HAND based on streams defined by a minimal upstream area of 1000 km2
hand = flw.hand(drain=uparea > 1000, elevtn=elevtn)
# plot
ax = quickplot(title="Height above nearest drain (HAND)")
im = ax.imshow(
np.ma.masked_equal(hand, -9999),
extent=extent,
cmap="gist_earth_r",
alpha=0.5,
vmin=0,
vmax=150,
)
fig = plt.gcf()
cax = fig.add_axes([0.82, 0.37, 0.02, 0.12])
fig.colorbar(im, cax=cax, orientation="vertical")
cax.set_ylabel("HAND [m]")
plt.savefig("hand.png")
```
## Floodplains
The [floodplains()](reference.rst#pyflwdir.FlwdirRaster.floodplains) method delineates geomorphic floodplain boundaries based on a power-law relation between upstream area and a maximum HAND contour as developed by [Nardi et al (2019)](http://www.doi.org/10.1038/sdata.2018.309). Here, streams are defined based on a minimum upstream area threshold `upa_min` and floodplains on the scaling parameter `b` of the power-law relationship.
```
floodplains = flw.floodplains(elevtn=elevtn, uparea=uparea, upa_min=1000)
# plot
floodmap = (floodplains, -1, dict(cmap="Blues", alpha=0.5, vmin=0))
ax = quickplot(
raster=floodmap, title="Geomorphic floodplains", filename="flw_floodplain"
)
```
|
github_jupyter
|
Note: range sliders and range selectors are available in version 1.9.7+
Run pip install plotly --upgrade to update your Plotly version
```
import plotly
plotly.__version__
```
## Basic Range Slider and Range Selectors
```
from cswd import query_adjusted_pricing
OHLCV = ['open','high','low','close','volume']
df = query_adjusted_pricing('000001','2007-10-1','2009-4-1',OHLCV,True)
import plotly.plotly as py
import plotly.graph_objs as go
from datetime import datetime
trace = go.Scatter(x=df.index,
y=df.high)
data = [trace]
layout = dict(
title='带滑块和选择器的时间序列',
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=1,
label='1w',
step='week',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
rangeslider=dict(),
type='date'
)
)
fig = dict(data=data, layout=layout)
py.iplot(fig)
```
## Range Slider with Vertically Stacked Subplots
```
import plotly.plotly as py
import plotly.graph_objs as go
trace1 = go.Scatter(
x = ["2013-01-15", "2013-01-29", "2013-02-26", "2013-04-19", "2013-07-02", "2013-08-27",
"2013-10-22", "2014-01-20", "2014-05-05", "2014-07-01", "2015-02-09", "2015-04-13",
"2015-05-13", "2015-06-08", "2015-08-05", "2016-02-25"],
y = ["8", "3", "2", "10", "5", "5", "6", "8", "3", "3", "7", "5", "10", "10", "9", "14"],
name = "var0",
text = ["8", "3", "2", "10", "5", "5", "6", "8", "3", "3", "7", "5", "10", "10", "9", "14"],
yaxis = "y",
)
trace2 = go.Scatter(
x = ["2015-04-13", "2015-05-13", "2015-06-08", "2015-08-05", "2016-02-25"],
y = ["53.0", "69.0", "89.0", "41.0", "41.0"],
name = "var1",
text = ["53.0", "69.0", "89.0", "41.0", "41.0"],
yaxis = "y2",
)
trace3 = go.Scatter(
x = ["2013-01-29", "2013-02-26", "2013-04-19", "2013-07-02", "2013-08-27", "2013-10-22",
"2014-01-20", "2014-04-09", "2014-05-05", "2014-07-01", "2014-09-30", "2015-02-09",
"2015-04-13", "2015-06-08", "2016-02-25"],
y = ["9.6", "4.6", "2.7", "8.3", "18", "7.3", "3", "7.5", "1.0", "0.5", "2.8", "9.2",
"13", "5.8", "6.9"],
name = "var2",
text = ["9.6", "4.6", "2.7", "8.3", "18", "7.3", "3", "7.5", "1.0", "0.5", "2.8", "9.2",
"13", "5.8", "6.9"],
yaxis = "y3",
)
trace4 = go.Scatter(
x = ["2013-01-29", "2013-02-26", "2013-04-19", "2013-07-02", "2013-08-27", "2013-10-22",
"2014-01-20", "2014-04-09", "2014-05-05", "2014-07-01", "2014-09-30", "2015-02-09",
"2015-04-13", "2015-06-08", "2016-02-25"],
y = ["6.9", "7.5", "7.3", "7.3", "6.9", "7.1", "8", "7.8", "7.4", "7.9", "7.9", "7.6",
"7.2", "7.2", "8.0"],
name = "var3",
text = ["6.9", "7.5", "7.3", "7.3", "6.9", "7.1", "8", "7.8", "7.4", "7.9", "7.9", "7.6",
"7.2", "7.2", "8.0"],
yaxis = "y4",
)
trace5 = go.Scatter(
x = ["2013-02-26", "2013-07-02", "2013-09-26", "2013-10-22", "2013-12-04", "2014-01-02",
"2014-01-20", "2014-05-05", "2014-07-01", "2015-02-09", "2015-05-05"],
y = ["290", "1078", "263", "407", "660", "740", "33", "374", "95", "734", "3000"],
name = "var4",
text = ["290", "1078", "263", "407", "660", "740", "33", "374", "95", "734", "3000"],
yaxis = "y5",
)
data = go.Data([trace1, trace2, trace3, trace4, trace5])
# style all the traces
for k in range(len(data)):
data[k].update(
{
"type": "scatter",
"hoverinfo": "name+x+text",
"line": {"width": 0.5},
"marker": {"size": 8},
"mode": "lines+markers",
"showlegend": False
}
)
layout = {
"annotations": [
{
"x": "2013-06-01",
"y": 0,
"arrowcolor": "rgba(63, 81, 181, 0.2)",
"arrowsize": 0.3,
"ax": 0,
"ay": 30,
"text": "state1",
"xref": "x",
"yanchor": "bottom",
"yref": "y"
},
{
"x": "2014-09-13",
"y": 0,
"arrowcolor": "rgba(76, 175, 80, 0.1)",
"arrowsize": 0.3,
"ax": 0,
"ay": 30,
"text": "state2",
"xref": "x",
"yanchor": "bottom",
"yref": "y"
}
],
"dragmode": "zoom",
"hovermode": "x",
"legend": {"traceorder": "reversed"},
"margin": {
"t": 100,
"b": 100
},
"shapes": [
{
"fillcolor": "rgba(63, 81, 181, 0.2)",
"line": {"width": 0},
"type": "rect",
"x0": "2013-01-15",
"x1": "2013-10-17",
"xref": "x",
"y0": 0,
"y1": 0.95,
"yref": "paper"
},
{
"fillcolor": "rgba(76, 175, 80, 0.1)",
"line": {"width": 0},
"type": "rect",
"x0": "2013-10-22",
"x1": "2015-08-05",
"xref": "x",
"y0": 0,
"y1": 0.95,
"yref": "paper"
}
],
"xaxis": {
"autorange": True,
"range": ["2012-10-31 18:36:37.3129", "2016-05-10 05:23:22.6871"],
"rangeslider": {
"autorange": True,
"range": ["2012-10-31 18:36:37.3129", "2016-05-10 05:23:22.6871"]
},
"type": "date"
},
"yaxis": {
"anchor": "x",
"autorange": True,
"domain": [0, 0.2],
"linecolor": "#673ab7",
"mirror": True,
"range": [-60.0858369099, 28.4406294707],
"showline": True,
"side": "right",
"tickfont": {"color": "#673ab7"},
"tickmode": "auto",
"ticks": "",
"titlefont": {"color": "#673ab7"},
"type": "linear",
"zeroline": False
},
"yaxis2": {
"anchor": "x",
"autorange": True,
"domain": [0.2, 0.4],
"linecolor": "#E91E63",
"mirror": True,
"range": [29.3787777032, 100.621222297],
"showline": True,
"side": "right",
"tickfont": {"color": "#E91E63"},
"tickmode": "auto",
"ticks": "",
"titlefont": {"color": "#E91E63"},
"type": "linear",
"zeroline": False
},
"yaxis3": {
"anchor": "x",
"autorange": True,
"domain": [0.4, 0.6],
"linecolor": "#795548",
"mirror": True,
"range": [-3.73690396239, 22.2369039624],
"showline": True,
"side": "right",
"tickfont": {"color": "#795548"},
"tickmode": "auto",
"ticks": "",
"title": "mg/L",
"titlefont": {"color": "#795548"},
"type": "linear",
"zeroline": False
},
"yaxis4": {
"anchor": "x",
"autorange": True,
"domain": [0.6, 0.8],
"linecolor": "#607d8b",
"mirror": True,
"range": [6.63368032236, 8.26631967764],
"showline": True,
"side": "right",
"tickfont": {"color": "#607d8b"},
"tickmode": "auto",
"ticks": "",
"title": "mmol/L",
"titlefont": {"color": "#607d8b"},
"type": "linear",
"zeroline": False
},
"yaxis5": {
"anchor": "x",
"autorange": True,
"domain": [0.8, 1],
"linecolor": "#2196F3",
"mirror": True,
"range": [-685.336803224, 3718.33680322],
"showline": True,
"side": "right",
"tickfont": {"color": "#2196F3"},
"tickmode": "auto",
"ticks": "",
"title": "mg/Kg",
"titlefont": {"color": "#2196F3"},
"type": "linear",
"zeroline": False
}
}
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
```
|
github_jupyter
|
# Kernel density estimation
```
# Import all libraries needed for the exploration
# General syntax to import specific functions in a library:
##from (library) import (specific library function)
from pandas import DataFrame, read_csv
# General syntax to import a library but no functions:
##import (library) as (give the library a nickname/alias)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd #this is how we usually import pandas
import numpy as np #this is how we usually import numpy
import sys #only needed to determine Python version number
import matplotlib #only needed to determine Matplotlib version number
import tables # pytables is needed to read and write hdf5 files
import openpyxl # is used to read and write MS Excel files
import xgboost
import math
from scipy.stats import pearsonr
from sklearn.linear_model import LinearRegression
from sklearn import tree, linear_model
from sklearn.model_selection import cross_validate, cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import explained_variance_score
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
from statsmodels.nonparametric.kde import KDEUnivariate
from statsmodels.nonparametric.kernel_density import KDEMultivariate
# Enable inline plotting
%matplotlib inline
# Supress some warnings:
import warnings
warnings.filterwarnings('ignore')
print('Python version ' + sys.version)
print('Pandas version ' + pd.__version__)
print('Numpy version ' + np.__version__)
print('Matplotlib version ' + matplotlib.__version__)
print('Seaborn version ' + sns.__version__)
```
## Training data
```
data = pd.read_csv('../data/train.csv')
```
### Explore the data
```
# Check the number of data points in the data set
print('No observations:', len(data))
# Check the number of features in the data set
print('No variables:', len(data.columns))
# Check the data types
print(data.dtypes.unique())
data.shape
data.columns
for i, col in enumerate(data.columns, start=0):
print(i, col)
# We may have some categorical features, let's check them
data.select_dtypes(include=['O']).columns.tolist()
# Check any number of columns with NaN
print(data.isnull().any().sum(), ' / ', len(data.columns))
# Check number of data points with any NaN
print(data.isnull().any(axis=1).sum(), ' / ', len(data))
```
### Select features and targets
```
features = data.iloc[:,9:-1].columns.tolist()
target = data.iloc[:,-1].name
all_lh_features = [
'CSF', 'CC_Posterior', 'CC_Mid_Posterior', 'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior', 'EstimatedTotalIntraCranialVol',
'Left-Lateral-Ventricle',
'Left-Inf-Lat-Vent',
'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex',
'Left-Thalamus-Proper',
'Left-Caudate',
'Left-Putamen',
'Left-Pallidum',
'Left-Hippocampus',
'Left-Amygdala',
'Left-Accumbens-area',
'Left-VentralDC',
'Left-vessel',
'Left-choroid-plexus',
'Left-WM-hypointensities',
'Left-non-WM-hypointensities',
'lhCortexVol',
'lhCerebralWhiteMatterVol',
'lhSurfaceHoles',
'lh.aparc.thickness',
'lh_bankssts_thickness',
'lh_caudalanteriorcingulate_thickness',
'lh_caudalmiddlefrontal_thickness',
'lh_cuneus_thickness',
'lh_entorhinal_thickness',
'lh_fusiform_thickness',
'lh_inferiorparietal_thickness',
'lh_inferiortemporal_thickness',
'lh_isthmuscingulate_thickness',
'lh_lateraloccipital_thickness',
'lh_lateralorbitofrontal_thickness',
'lh_lingual_thickness',
'lh_medialorbitofrontal_thickness',
'lh_middletemporal_thickness',
'lh_parahippocampal_thickness',
'lh_paracentral_thickness',
'lh_parsopercularis_thickness',
'lh_parsorbitalis_thickness',
'lh_parstriangularis_thickness',
'lh_pericalcarine_thickness',
'lh_postcentral_thickness',
'lh_posteriorcingulate_thickness',
'lh_precentral_thickness',
'lh_precuneus_thickness',
'lh_rostralanteriorcingulate_thickness',
'lh_rostralmiddlefrontal_thickness',
'lh_superiorfrontal_thickness',
'lh_superiorparietal_thickness',
'lh_superiortemporal_thickness',
'lh_supramarginal_thickness',
'lh_frontalpole_thickness',
'lh_temporalpole_thickness',
'lh_transversetemporal_thickness',
'lh_insula_thickness',
'lh_MeanThickness_thickness'
]
data_lh = data[all_lh_features]
data_lh.describe().T
dropcolumns = [
'EstimatedTotalIntraCranialVol',
'CSF',
'CC_Posterior',
'CC_Mid_Posterior',
'CC_Central',
'CC_Mid_Anterior',
'CC_Anterior'
]
df_lh = data_lh.drop(dropcolumns, axis=1)
df_lh
target
from sklearn.neighbors import KernelDensity
from scipy.stats import gaussian_kde
from statsmodels.nonparametric.kde import KDEUnivariate
from statsmodels.nonparametric.kernel_density import KDEMultivariate
def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return kde.evaluate(x_grid)
def kde_statsmodels_u(x, x_grid, bandwidth=0.2, **kwargs):
"""Univariate Kernel Density Estimation with Statsmodels"""
kde = KDEUnivariate(x)
kde.fit(bw=bandwidth, **kwargs)
return kde.evaluate(x_grid)
def kde_statsmodels_m(x, x_grid, bandwidth=0.2, **kwargs):
"""Multivariate Kernel Density Estimation with Statsmodels"""
kde = KDEMultivariate(x, bw=bandwidth * np.ones_like(x),
var_type='c', **kwargs)
return kde.pdf(x_grid)
def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scikit-learn"""
kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)
kde_skl.fit(x[:, np.newaxis])
# score_samples() returns the log-likelihood of the samples
log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis])
return np.exp(log_pdf)
kde_funcs = [kde_statsmodels_u, kde_statsmodels_m, kde_scipy, kde_sklearn]
kde_funcnames = ['Statsmodels-U', 'Statsmodels-M', 'Scipy', 'Scikit-learn']
print "Package Versions:"
import sklearn; print " scikit-learn:", sklearn.__version__
import scipy; print " scipy:", scipy.__version__
import statsmodels; print " statsmodels:", statsmodels.__version__
```
### Discretization of Age variable
Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point.
```
pd.qcut(data['Age'], 8).head(1)
```
#### Columns with missing values
```
def missing(dff):
print (round((dff.isnull().sum() * 100/ len(dff)),4).sort_values(ascending=False))
missing(df_lh)
```
#### How to remove columns with too many missing values in Python
https://stackoverflow.com/questions/45515031/how-to-remove-columns-with-too-many-missing-values-in-python
```
def rmissingvaluecol(dff,threshold):
l = []
l = list(dff.drop(dff.loc[:,list((100*(dff.isnull().sum()/len(dff.index))>=threshold))].columns, 1).columns.values)
print("# Columns having more than %s percent missing values:"%threshold,(dff.shape[1] - len(l)))
print("Columns:\n",list(set(list((dff.columns.values))) - set(l)))
return l
#Here threshold is 10% which means we are going to drop columns having more than 10% of missing values
rmissingvaluecol(data,10)
# Now create new dataframe excluding these columns
l = rmissingvaluecol(data,10)
data1 = data[l]
# missing(data[features])
```
#### Correlations between features and target
```
correlations = {}
for f in features:
data_temp = data1[[f,target]]
x1 = data_temp[f].values
x2 = data_temp[target].values
key = f + ' vs ' + target
correlations[key] = pearsonr(x1,x2)[0]
data_correlations = pd.DataFrame(correlations, index=['Value']).T
data_correlations.loc[data_correlations['Value'].abs().sort_values(ascending=False).index]
```
#### We can see that the top 5 features are the most correlated features with the target "Age"
```
y = data.loc[:,['lh_insula_thickness','rh_insula_thickness',target]].sort_values(target, ascending=True).values
x = np.arange(y.shape[0])
%matplotlib inline
plt.subplot(3,1,1)
plt.plot(x,y[:,0])
plt.title('lh_insula_thickness and rh_insula_thickness vs Age')
plt.ylabel('lh_insula_thickness')
plt.subplot(3,1,2)
plt.plot(x,y[:,1])
plt.ylabel('rh_insula_thickness')
plt.subplot(3,1,3)
plt.plot(x,y[:,2],'r')
plt.ylabel("Age")
plt.show()
```
### Predicting Age
```
# Train a simple linear regression model
regr = linear_model.LinearRegression()
new_data = data[features]
X = new_data.values
y = data.Age.values
X_train, X_test, y_train, y_test = train_test_split(X, y ,test_size=0.2)
regr.fit(X_train, y_train)
print(regr.predict(X_test))
regr.score(X_test,y_test)
# Calculate the Root Mean Squared Error
print("RMSE: %.2f"
% math.sqrt(np.mean((regr.predict(X_test) - y_test) ** 2)))
# Let's try XGboost algorithm to see if we can get better results
xgb = xgboost.XGBRegressor(n_estimators=100, learning_rate=0.08, gamma=0, subsample=0.75,
colsample_bytree=1, max_depth=7)
traindf, testdf = train_test_split(X_train, test_size = 0.3)
xgb.fit(X_train,y_train)
predictions = xgb.predict(X_test)
print(explained_variance_score(predictions,y_test))
```
### This is worse than a simple regression model
We can use `.describe()` to calculate simple **descriptive statistics** for the dataset (rounding to 3 decimals):
```
new_data.describe().round(3).T
```
Computing the **pairwise correlation of columns** (features). Method could be ‘pearson’ (default), ‘kendall’, or ‘spearman’.
```
new_data.corr().round(2)
new_data.describe()
```
Splitting the object (iris DataFrame) **into groups** (species)
```
grouped = data.groupby('Sex')
grouped.groups
```
Describe the group-wise `PetalLength` summary statistics
```
print('Age:')
grouped['Age'].describe()
```
Iterating through the grouped data is very natural
```
for name, group in grouped:
print(name,':')
print(group.describe().round(2).head(3))
```
**Group-wise feature correlations**
```
data.groupby('Age').corr().round(3)
```
DataFrame has an `assign()` method that allows you to easily create new columns that are potentially derived from existing columns.
```
iris.assign(sepal_ratio = iris['SepalWidth'] / iris['SepalLength']).head().round(3)
```
In the example above, we inserted a precomputed value. <br>
We can also pass in a function of one argument to be evaluated on the DataFrame being assigned to.
```
iris.assign(sepal_ratio = lambda x: (x['SepalWidth'] /
x['SepalLength'])).head().round(3)
```
`assign` always returns a copy of the data, leaving the original DataFrame untouched, e.g.
```
iris.head(2)
```
Passing a callable, as opposed to an actual value to be inserted, is useful when you don’t have a reference to the DataFrame at hand. This is common when using assign`` in a chain of operations. For example, we can limit the DataFrame to just those observations with a Sepal Length greater than 5, calculate the ratio, and plot:
```
(iris.query('SepalLength > 5')
.assign(SepalRatio = lambda x: x.SepalWidth / x.SepalLength,
PetalRatio = lambda x: x.PetalWidth / x.PetalLength)
.plot(kind='scatter', x='SepalRatio', y='PetalRatio'))
```
### Classification
*Organizing data as X and y before classification*
```
from sklearn.preprocessing import LabelEncoder
# dfX5Y = pd.read_csv('../results/02_X5Y.csv', sep=',')
# print(dfX5Y.info())
# print(dfX5Y.describe())
# dfX5Y
# Featuer importance XGBoost:
# X = df.loc[:, ['CC_Mid_Anterior_w3', 'BrainSegVol-to-eTIV_w3', 'CSF_w2']] # Top three important features
# Featuer importance RF (Strrop_3):
X = df.loc[:, ['BrainSegVol-to-eTIV_w3', 'CC_Mid_Anterior_w3', 'ic04-ic02']] # Top three important features
# Featuer importance RF predicrting Stroop_1_R_3:
# X = df.loc[:, ['ic09-ic06', 'ic10-ic01', 'ic05-ic03']] # Top three important features
# Featuer importance RF predicrting Stroop_2_R_3:
# X = df.loc[:, ['WM-hypointensities_w3', 'ic17-ic04', 'Left-vessel_w3']] # Top three important features
# X = df.loc[:, ['BrainSegVol-to-eTIV_w3', 'ic04-ic02']] # Two important features
# X = df.loc[:, ['BrainSegVol-to-eTIV_w3', 'CC_Mid_Anterior_w3']] # Top two important features
Y = df.loc[:, ['Stroop_3_cat']]
y = Y.as_matrix().ravel()
np.unique(y)
X.columns
from sklearn.ensemble import VotingClassifier
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn import preprocessing
# X = dfX5Y.loc[:, dfX5Y.columns != 'grp'] # Top five important connections
# X = dfX5Y.loc[:, ['ic09-ic02', 'ic04-ic01']] # Top two important connections
# X = df.loc[:, ['LatVent_w2', 'LatVent_w3', 'ic09-ic02', 'ic04-ic01']]
# X = df.loc[:, ['LatVent_w3', 'ic09-ic02']]
# X = df.loc[:, ['LatVent_w2', 'LatVent_w3']]
# Y = df.loc[:, ['Stroop_3_cat']]
# X = df.loc[:, ['BrainSegVol-to-eTIV_w3', 'CC_Mid_Anterior_w3', 'ic04-ic02']]
# Y = df.loc[:, ['Stroop_3_cat']]
# y = Y.as_matrix().ravel()
rs = 42 # random_state (42)
hls = 3 # MLP hidden layer size (3 or 4)
# https://stackoverflow.com/questions/37659970/how-does-sklearn-compute-the-precision-score-metric
myaverage = 'weighted' # For multilabel classification 'micro', 'macro', 'samples', 'weighted'
# see: https://stackoverflow.com/questions/37659970/how-does-sklearn-compute-the-precision-score-metric
# http://scikit-learn.org/stable/modules/neural_networks_supervised.html
# Class MLPClassifier implements a multi-layer perceptron (MLP) algorithm that
# trains using Backpropagation.
# So what about size of the hidden layer(s)--how many neurons?
# There are some empirically-derived rules-of-thumb, of these, the most
# commonly relied on is 'the optimal size of the hidden layer is usually
# between the size of the input and size of the output layers'.
# Jeff Heaton, author of Introduction to Neural Networks in Java offers a few more.
#
# In sum, for most problems, one could probably get decent performance (even without
# a second optimization step) by setting the hidden layer configuration using j
# ust two rules:
# (i) number of hidden layers equals one; and
# (ii) the number of neurons in that layer is the mean of the neurons in the
# input and output layers.
# Compute the precision
# The precision is the ratio tp / (tp + fp) where tp is the number of true positives and
# fp the number of false positives. The precision is intuitively the ability of the
# classifier not to label as positive a sample that is negative.
# Compute the recall
# The recall is the ratio tp / (tp + fn) where tp is the number of true positives and
# fn the number of false negatives. The recall is intuitively the ability of the
# classifier to find all the positive samples.
# Compute the F1 score, also known as balanced F-score or F-measure
# The F1 score can be interpreted as a weighted average of the precision and recall,
# where an F1 score reaches its best value at 1 and worst score at 0.
# The relative contribution of precision and recall to the F1 score are equal.
# The formula for the F1 score is:
# F1 = 2 * (precision * recall) / (precision + recall)
# In the multi-class and multi-label case, this is the weighted average of the F1 score of each class.
pipe_clf1 = Pipeline([
('scl', StandardScaler()),
#('pca', PCA(n_components=2)),
('clf1', LogisticRegression(C=1., solver='saga', n_jobs=1,
multi_class='multinomial', random_state=rs))])
pipe_clf2 = Pipeline([
('scl', StandardScaler()),
#('pca', PCA(n_components=2)),
('clf2', MLPClassifier(hidden_layer_sizes=(hls, ), # =(100, ) ; =(4, )
activation='relu', solver='adam',
alpha=0.0001, batch_size='auto', learning_rate='constant',
learning_rate_init=0.001, power_t=0.5, max_iter=5000,
shuffle=True, random_state=rs, tol=0.0001, verbose=False,
warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1,
beta_1=0.9, beta_2=0.999, epsilon=1e-08))])
# pipe_clf3 = Pipeline([
# ('scl', StandardScaler()),
# #('pca', PCA(n_components=2)),
# ('clf3', RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None,
# min_samples_split=2, min_samples_leaf=1,
# min_weight_fraction_leaf=0.0, max_features='auto',
# max_leaf_nodes=None, # min_impurity_split=1e-07,
# bootstrap=True, oob_score=False, n_jobs=1,
# random_state=rs, verbose=0, warm_start=False,
# class_weight=None))])
# pipe_clf3 = Pipeline([
# ('scl', StandardScaler()),
# #('pca', PCA(n_components=2)),
# ('clf3', GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
# max_depth=None, max_features=None, max_leaf_nodes=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=100,
# presort='auto', random_state=rs, subsample=1.0, verbose=0,
# warm_start=False)
pipe_clf3 = Pipeline([
('scl', StandardScaler()),
#('pca', PCA(n_components=2)),
('clf3', XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=1,
gamma=0, learning_rate=0.1, max_delta_step=0, max_depth=3,
min_child_weight=1, missing=None, n_estimators=1000, nthread=-1,
objective='multi:softprob', reg_alpha=0, reg_lambda=1,
scale_pos_weight=1, seed=rs, silent=True, subsample=1))])
pipe_clf4 = Pipeline([
('scl', StandardScaler()),
#('pca', PCA(n_components=2)),
('clf4', SVC(C=1.0, probability=True, random_state=rs))])
# ('clf4', SVC(C=1.0, random_state=rs))])
pipe_clf5 = Pipeline([
('scl', StandardScaler()),
#('pca', PCA(n_components=2)),
('clf5', KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='kd_tree',
leaf_size=30, p=2, metric='minkowski',
metric_params=None, n_jobs=1))])
pipe_clf_vote = Pipeline([
# ('scl', StandardScaler()),
('clf_vote', VotingClassifier(
estimators=[('lr', pipe_clf1),
('mlp', pipe_clf2),
('rf', pipe_clf3),
('svc', pipe_clf4),
('knn', pipe_clf5)],
voting = 'soft'))])
# voting = 'hard'))])
scores1_acc, scores2_acc, scores3_acc, scores4_acc, scores5_acc, scores_vote_acc = [], [], [], [], [], []
scores1_pre, scores2_pre, scores3_pre, scores4_pre, scores5_pre, scores_vote_pre = [], [], [], [], [], []
scores1_rec, scores2_rec, scores3_rec, scores4_rec, scores5_rec, scores_vote_rec = [], [], [], [], [], []
scores1_f1, scores2_f1, scores3_f1, scores4_f1, scores5_f1, scores_vote_f1 = [], [], [], [], [], []
n_splits = 10 # k=10
# n_splits = X.shape[0] # i.e. Leave One Out strategy
# for train_index, test_index in LeaveOneOut.split(X):
k=1
for train_index, test_index in \
StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=rs).split(X,y):
print("Fold number:", k)
#print("\nTRUE class:\n", list(y[test_index]))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y[train_index], y[test_index]
#clf1 = LogisticRegression
print(" - LogisticRegression")
pipe_clf1.fit(X_train, y_train)
scores1_acc.append(accuracy_score(y_test, pipe_clf1.predict(X_test)))
scores1_pre.append(precision_score(y_test, pipe_clf1.predict(X_test), average=myaverage))
scores1_rec.append(recall_score(y_test, pipe_clf1.predict(X_test), average=myaverage))
scores1_f1.append(f1_score(y_test, pipe_clf1.predict(X_test), average=myaverage))
print(' Precision: %.2f' % (precision_score(y_test, pipe_clf1.predict(X_test), average=myaverage)))
print(' Recall: %.2f' % (recall_score(y_test, pipe_clf1.predict(X_test), average=myaverage)))
#print("LR predicted:\n", list(pipe_clf1.predict(X_test)))
#clf2 = MLPClassifier
print(" - MLPClassifier")
pipe_clf2.fit(X_train, y_train)
scores2_acc.append(accuracy_score(y_test, pipe_clf2.predict(X_test)))
scores2_pre.append(precision_score(y_test, pipe_clf2.predict(X_test), average=myaverage))
scores2_rec.append(recall_score(y_test, pipe_clf2.predict(X_test), average=myaverage))
scores2_f1.append(f1_score(y_test, pipe_clf2.predict(X_test), average=myaverage))
print(' Precision: %.2f' % (precision_score(y_test, pipe_clf2.predict(X_test), average=myaverage)))
print(' Recall: %.2f' % (recall_score(y_test, pipe_clf2.predict(X_test), average=myaverage)))
#print("MLP predicted:\n", list(pipe_clf2.predict(X_test)))
#clf3 = RandomForestClassifier
#print(" - RandomForestClassifier")
#clf3 = XGBoost
print(" - XGBoost")
pipe_clf3.fit(X_train, y_train)
scores3_acc.append(accuracy_score(y_test, pipe_clf3.predict(X_test)))
scores3_pre.append(precision_score(y_test, pipe_clf3.predict(X_test), average=myaverage))
scores3_rec.append(recall_score(y_test, pipe_clf3.predict(X_test), average=myaverage))
scores3_f1.append(f1_score(y_test, pipe_clf3.predict(X_test), average=myaverage))
print(' Precision: %.2f' % (precision_score(y_test, pipe_clf3.predict(X_test), average=myaverage)))
print(' Recall: %.2f' % (recall_score(y_test, pipe_clf3.predict(X_test), average=myaverage)))
#print("RF predicted:\n", list(pipe_clf3.predict(X_test)))
#print("XGB predicted:\n", list(pipe_clf3.predict(X_test)))
#clf4 = svm.SVC()
print(" - svm/SVC")
pipe_clf4.fit(X_train, y_train)
scores4_acc.append(accuracy_score(y_test, pipe_clf4.predict(X_test)))
scores4_pre.append(precision_score(y_test, pipe_clf4.predict(X_test), average=myaverage))
scores4_rec.append(recall_score(y_test, pipe_clf4.predict(X_test), average=myaverage))
scores4_f1.append(f1_score(y_test, pipe_clf4.predict(X_test), average=myaverage))
print(' Precision: %.2f' % (precision_score(y_test, pipe_clf4.predict(X_test), average=myaverage)))
print(' Recall: %.2f' % (recall_score(y_test, pipe_clf4.predict(X_test), average=myaverage)))
#print("SVM predicted:\n", list(pipe_clf4.predict(X_test)))
#clf5 = KNeighborsClassifier
print(" - KNN")
pipe_clf5.fit(X_train, y_train)
scores5_acc.append(accuracy_score(y_test, pipe_clf5.predict(X_test)))
scores5_pre.append(precision_score(y_test, pipe_clf5.predict(X_test), average=myaverage))
scores5_rec.append(recall_score(y_test, pipe_clf5.predict(X_test), average=myaverage))
scores5_f1.append(f1_score(y_test, pipe_clf5.predict(X_test), average=myaverage))
#print("KNN predicted:\n", list(pipe_clf5.predict(X_test)))
#clf_vote = VotingClassifier
print(" - VotingClassifier")
pipe_clf_vote.fit(X_train, y_train)
scores_vote_acc.append(accuracy_score(y_test, pipe_clf_vote.predict(X_test)))
scores_vote_pre.append(precision_score(y_test, pipe_clf_vote.predict(X_test), average=myaverage))
scores_vote_rec.append(recall_score(y_test, pipe_clf_vote.predict(X_test), average=myaverage))
scores_vote_f1.append(f1_score(y_test, pipe_clf_vote.predict(X_test), average=myaverage))
print(' Precision: %.2f' % (precision_score(y_test, pipe_clf_vote.predict(X_test), average=myaverage)))
print(' Recall: %.2f' % (recall_score(y_test, pipe_clf_vote.predict(X_test), average=myaverage)))
k=k+1
print('\nPredictors:')
print('X.columns = %s' % list(X.columns))
print('\nOutcome:')
print(pd.qcut(df['Stroop_3_R_3'], 3).head(0))
print(np.unique(y))
print('\nSome hyperparameters:')
print("MLP hidden_layer_size = %d" % (hls))
print("random_state = %d" % (rs))
print("score average = '%s'" % (myaverage))
print("\nLR : CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores1_acc), np.std(scores1_acc), n_splits))
print("MLP: CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores2_acc), np.std(scores2_acc), n_splits))
# print("RF : CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores3_acc), np.std(scores3_acc), n_splits))
print("XGB : CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores3_acc), np.std(scores3_acc), n_splits))
print("SVM: CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores4_acc), np.std(scores4_acc), n_splits))
print("KNN: CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores5_acc), np.std(scores5_acc), n_splits))
print("Voting: CV accuracy = %.3f +-%.3f (k=%d)" % (np.mean(scores_vote_acc), np.std(scores_vote_acc), n_splits))
print("\nLR : CV precision = %.3f +-%.3f (k=%d)" % (np.mean(scores1_pre), np.std(scores1_pre), n_splits))
print("MLP: CV precision = %.3f +-%.3f (k=%d)" % (np.mean(scores2_pre), np.std(scores2_pre), n_splits))
print("XGB : CV precision = %.3f +-%.3f (k=%d)" % (np.mean(scores3_pre), np.std(scores3_pre), n_splits))
print("SVM: CV precision = %.3f +-%.3f (k=%d)" % (np.mean(scores4_pre), np.std(scores4_pre), n_splits))
print("KNN: CV precision = %.3f +-%.3f (k=%d)" % (np.mean(scores5_pre), np.std(scores5_pre), n_splits))
print("Voting: CV precision = %.3f +-%.3f (k=%d)" % (np.mean(scores_vote_pre), np.std(scores_vote_pre), n_splits))
print("\nLR : CV recall = %.3f +-%.3f (k=%d)" % (np.mean(scores1_rec), np.std(scores1_rec), n_splits))
print("MLP: CV recall = %.3f +-%.3f (k=%d)" % (np.mean(scores2_rec), np.std(scores2_rec), n_splits))
print("XGB : CV recall = %.3f +-%.3f (k=%d)" % (np.mean(scores3_rec), np.std(scores3_rec), n_splits))
print("SVM: CV recall = %.3f +-%.3f (k=%d)" % (np.mean(scores4_rec), np.std(scores4_rec), n_splits))
print("KNN: CV recall = %.3f +-%.3f (k=%d)" % (np.mean(scores5_rec), np.std(scores5_rec), n_splits))
print("Voting: CV recall = %.3f +-%.3f (k=%d)" % (np.mean(scores_vote_rec), np.std(scores_vote_rec), n_splits))
print("\nLR : CV F1-score = %.3f +-%.3f (k=%d)" % (np.mean(scores1_f1), np.std(scores1_f1), n_splits))
print("MLP: CV F1-score = %.3f +-%.3f (k=%d)" % (np.mean(scores2_f1), np.std(scores2_f1), n_splits))
print("XGB : CV F1-score = %.3f +-%.3f (k=%d)" % (np.mean(scores3_f1), np.std(scores3_f1), n_splits))
print("SVM: CV F1-score = %.3f +-%.3f (k=%d)" % (np.mean(scores4_f1), np.std(scores4_f1), n_splits))
print("KNN: CV F1-score = %.3f +-%.3f (k=%d)" % (np.mean(scores5_f1), np.std(scores5_f1), n_splits))
print("Voting: CV F1-score = %.3f +-%.3f (k=%d)" % (np.mean(scores_vote_f1), np.std(scores_vote_f1), n_splits))
```
|
github_jupyter
|
```
import gevent
import random
import pandas as pd
import numpy as np
import math
import time
import functools as ft
import glob, os, sys
import operator as op
import shelve
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
#from pandas.api.types import is_numeric_dtypen()
from pathlib import Path
from itertools import combinations, product, permutations
from sqlalchemy.engine import create_engine
from datetime import datetime
from ast import literal_eval
from scipy import stats
from scipy.stats.mstats import gmean
from pythonds.basic.stack import Stack
from pythonds.trees.binaryTree import BinaryTree
from collections import defaultdict
import collections
from typing import List, Set, Tuple
from sklearn.metrics import classification_report, confusion_matrix
from scipy import sparse
#!pip install pythonds
```
```
# STEP-1: CHOOSE YOUR CORPUS
# TODO: get working with list of corpora
#corpora = ['mipacq','i2b2','fairview'] #options for concept extraction include 'fairview', 'mipacq' OR 'i2b2'
# cross-system semantic union merge filter for cross system aggregations using custom system annotations file with corpus name and system name using 'ray_test':
# need to add semantic type filrering when reading in sys_data
#corpus = 'ray_test'
#corpus = 'clinical_trial2'
corpus = 'fairview'
#corpora = ['i2b2','fairview']
# STEP-2: CHOOSE YOUR DATA DIRECTORY; this is where output data will be saved on your machine
data_directory = '/mnt/DataResearch/gsilver1/output/'
# STEP-3: CHOOSE WHICH SYSTEMS YOU'D LIKE TO EVALUATE AGAINST THE CORPUS REFERENCE SET
#systems = ['biomedicus', 'clamp', 'ctakes', 'metamap', 'quick_umls']
#systems = ['biomedicus', 'clamp', 'metamap', 'quick_umls']
#systems = ['biomedicus', 'quick_umls']
#systems = ['biomedicus', 'ctakes', 'quick_umls']
systems = ['biomedicus', 'clamp', 'ctakes', 'metamap']
#systems = ['biomedicus', 'clamp']
#systems = ['ctakes', 'quick_umls', 'biomedicus', 'metamap']
#systems = ['biomedicus', 'metamap']
#systems = ['ray_test']
#systems = ['metamap']
# STEP-4: CHOOSE TYPE OF RUN
rtype = 6 # OPTIONS INCLUDE: 1->Single systems; 2->Ensemble; 3->Tests; 4 -> majority vote
# The Ensemble can include the max system set ['ctakes','biomedicus','clamp','metamap','quick_umls']
# STEP-5: CHOOSE WHAT TYPE OF ANALYSIS YOU'D LIKE TO RUN ON THE CORPUS
analysis_type = 'full' #options include 'entity', 'cui' OR 'full'
# STEP-(6A): ENTER DETAILS FOR ACCESSING MANUAL ANNOTATION DATA
database_type = 'postgresql+psycopg2' # We use mysql+pymql as default
database_username = 'gsilver1'
database_password = 'nej123'
database_url = 'd0pconcourse001' # HINT: use localhost if you're running database on your local machine
#database_name = 'clinical_trial' # Enter database name
database_name = 'covid-19' # Enter database name
def ref_data(corpus):
return corpus + '_all' # Enter the table within the database where your reference data is stored
table_name = ref_data(corpus)
# STEP-(6B): ENTER DETAILS FOR ACCESSING SYSTEM ANNOTATION DATA
def sys_data(corpus, analysis_type):
if analysis_type == 'entity':
return 'analytical_'+corpus+'.csv' # OPTIONS include 'analytical_cui_mipacq_concepts.csv' OR 'analytical_cui_i2b2_concepts.csv'
elif analysis_type in ('cui', 'full'):
return 'analytical_'+corpus+'_cui.csv' # OPTIONS include 'analytical_cui_mipacq_concepts.csv' OR 'analytical_cui_i2b2_concepts.csv'
system_annotation = sys_data(corpus, analysis_type)
# STEP-7: CREATE A DB CONNECTION POOL
engine_request = str(database_type)+'://'+database_username+':'+database_password+"@"+database_url+'/'+database_name
engine = create_engine(engine_request, pool_pre_ping=True, pool_size=20, max_overflow=30)
# STEP-(8A): FILTER BY SEMTYPE
filter_semtype = True #False
# STEP-(8B): IF STEP-(8A) == True -> GET REFERENCE SEMTYPES
def ref_semtypes(filter_semtype, corpus):
if filter_semtype:
if corpus == 'fairview':
semtypes = ['Disorders']
else: pass
return semtypes
semtypes = ref_semtypes(filter_semtype, corpus)
# STEP-9: Set data directory/table for source documents for vectorization
src_table = 'sofa'
# STEP-10: Specificy match type from {'exact', 'overlap', 'cui' -> kludge for majority}
run_type = 'overlap'
# for clinical trial, measurement/temoral are single system since no overlap for intersect
# STEP-11: Specify expression type for run (TODO: run all at once; make less kludgey)
expression_type = 'nested' #'nested_with_singleton' # type of merge expression: nested ((A&B)|C), paired ((A&B)|(C&D)), nested_with_singleton ((A&B)|((C&D)|E))
# -> NB: len(systems) for pair must be >= 4, and for nested_with_singleton == 5; single-> skip merges
# STEP-12: Specify type of ensemble: merge or vote
ensemble_type = 'merge'
# STEP-13: run on negation modifier (TODO: negated entity)
modification = None #'negation'
```
****** TODO
-> add majority vote to union for analysis_type = 'full'
-> case for multiple labels on same/overlapping span/same system; disambiguate (order by score if exists and select random for ties): done!
-> port to command line
----------------------->
-> still need to validate that all semtypes in corpus!
-> handle case where intersect merges are empty/any confusion matrix values are 0; specificallly on empty df in evaluate method: done!
-> case when system annotations empty from semtype filter; print as 0: done!
-> trim whitespace on CSV import -> done for semtypes
-> eliminate rtype = 1 for expression_type = 'single'
-> cross-system semantic union merge on aggregation
-> negation: testing
-> other modification, such as 'present'
-> clean up configuration process
-> allow iteration through all corpora and semtypes
-> optimize vecorization (remove confusion?)
```
# config class for analysis
class AnalysisConfig():
"""
Configuration object:
systems to use
notes by corpus
paths by output, gold and system location
"""
def __init__(self):
self = self
self.systems = systems
self.data_dir = data_directory
def corpus_config(self):
usys_data = system_annotation
ref_data = database_name+'.'+table_name
return usys_data, ref_data
analysisConf = AnalysisConfig()
#usys, ref = analysisConf.corpus_config()
class SemanticTypes(object):
'''
Filter semantic types based on: https://metamap.nlm.nih.gov/SemanticTypesAndGroups.shtml
:params: semtypes list from corpus, system to query
:return: list of equivalent system semtypes
'''
def __init__(self, semtypes, corpus):
self = self
# if corpus == 'clinical_trial2':
# corpus = 'clinical_trial' # kludge!!
# sql = "SELECT st.tui, abbreviation, clamp_name, ctakes_name, biomedicus_name FROM clinical_trial.semantic_groups sg join semantic_types st on sg.tui = st.tui where " + corpus + "_name in ({})"\
# .format(', '.join(['%s' for _ in semtypes]))
sql = "SELECT st.tui, abbreviation, clamp_name, ctakes_name FROM semantic_groups sg join semantic_types st on sg.tui = st.tui where group_name in ({})"\
.format(', '.join(['%s' for _ in semtypes]))
stypes = pd.read_sql(sql, params=[semtypes], con=engine)
if len(stypes['tui'].tolist()) > 0:
self.biomedicus_types = set(stypes['tui'].tolist())
self.qumls_types = set(stypes['tui'].tolist())
else:
self.biomedicus_types = None
self.qumls_types = None
if stypes['clamp_name'].dropna(inplace=True) or len(stypes['clamp_name']) == 0:
self.clamp_types = None
else:
self.clamp_types = set(stypes['clamp_name'].tolist()[0].split(','))
if stypes['ctakes_name'].dropna(inplace=True) or len(stypes['ctakes_name']) == 0:
self.ctakes_types = None
else:
self.ctakes_types = set(stypes['ctakes_name'].tolist()[0].split(','))
# # # Kludge for b9 temporal
# if stypes['biomedicus_name'].dropna(inplace=True) or len(stypes['biomedicus_name']) > 0:
# self.biomedicus_types.update(set(stypes['biomedicus_name'].tolist()[0].split(',')))
# #else:
# # self.biomedicus_type = None
if len(stypes['abbreviation'].tolist()) > 0:
self.metamap_types = set(stypes['abbreviation'].tolist())
else:
self.metamap_types = None
self.reference_types = set(semtypes)
def get_system_type(self, system):
if system == 'biomedicus':
semtypes = self.biomedicus_types
elif system == 'ctakes':
semtypes = self.ctakes_types
elif system == 'clamp':
semtypes = self.clamp_types
elif system == 'metamap':
semtypes = self.metamap_types
elif system == 'quick_umls':
semtypes = self.qumls_types
elif system == 'reference':
semtypes = self.reference_types
return semtypes
# print(SemanticTypes(['Drug'], corpus).get_system_type('biomedicus'))
#print(SemanticTypes(['Drug'], corpus).get_system_type('quick_umls'))
#print(SemanticTypes(['drug'], corpus).get_system_type('clamp'))
#print(SemanticTypes(['Disorders'], 'fairview').get_system_type('clamp'))
#semtypes = ['test,treatment']
#semtypes = 'drug,drug::drug_name,drug::drug_dose,dietary_supplement::dietary_supplement_name,dietary_supplement::dietary_supplement_dose'
#semtypes = 'demographics::age,demographics::sex,demographics::race_ethnicity,demographics::bmi,demographics::weight'
#corpus = 'clinical_trial'
#sys = 'quick_umls'
# is semantic type in particular system
def system_semtype_check(sys, semtype, corpus):
st = SemanticTypes([semtype], corpus).get_system_type(sys)
if st:
return sys
else:
return None
#print(system_semtype_check(sys, semtypes, corpus))
# annotation class for systems
class AnnotationSystems():
"""
System annotations of interest for UMLS concept extraction
NB: ctakes combines all "mentions" annotation types
"""
def __init__(self):
"""
annotation base types
"""
self.biomedicus_types = ["biomedicus.v2.UmlsConcept"]
self.clamp_types = ["edu.uth.clamp.nlp.typesystem.ClampNameEntityUIMA"]
self.ctakes_types = ["ctakes_mentions"]
self.metamap_types = ["org.metamap.uima.ts.Candidate"]
self.qumls_types = ["concept_jaccard_score_False"]
def get_system_type(self, system):
"""
return system types
"""
if system == "biomedicus":
view = "Analysis"
else:
view = "_InitialView"
if system == 'biomedicus':
types = self.biomedicus_types
elif system == 'clamp':
types = self.clamp_types
elif system == 'ctakes':
types = self.ctakes_types
elif system == 'metamap':
types = self.metamap_types
elif system == "quick_umls":
types = self.qumls_types
return types, view
annSys = AnnotationSystems()
%reload_ext Cython
#%%cython
#import numpy as np # access to Numpy from Python layer
#import math
class Metrics(object):
"""
metrics class:
returns an instance with confusion matrix metrics
"""
def __init__(self, system_only, gold_only, gold_system_match, system_n, neither = 0): # neither: no sys or manual annotation
self = self
self.system_only = system_only
self.gold_only = gold_only
self.gold_system_match = gold_system_match
self.system_n = system_n
self.neither = neither
def get_confusion_metrics(self, corpus = None, test = False):
"""
compute confusion matrix measures, as per
https://stats.stackexchange.com/questions/51296/how-do-you-calculate-precision-and-recall-for-multiclass-classification-using-co
"""
# cdef:
# int TP, FP, FN
# double TM
TP = self.gold_system_match
FP = self.system_only
FN = self.gold_only
TM = TP/math.sqrt(self.system_n) # TigMetric
if not test:
if corpus == 'casi':
recall = TP/(TP + FN)
precision = TP/(TP + FP)
F = 2*(precision*recall)/(precision + recall)
else:
if self.neither == 0:
confusion = [[0, self.system_only],[self.gold_only,self.gold_system_match]]
else:
confusion = [[self.neither, self.system_only],[self.gold_only,self.gold_system_match]]
c = np.asarray(confusion)
if TP != 0 or FP != 0:
precision = TP/(TP+FP)
else:
precision = 0
if TP != 0 or FN != 0:
recall = TP/(TP+FN)
else:
recall = 0
if precision + recall != 0:
F = 2*(precision*recall)/(precision + recall)
else:
F = 0
# recall = np.diag(c) / np.sum(c, axis = 1)
# precision = np.diag(c) / np.sum(c, axis = 0)
# #print('Yo!', np.mean(precision), np.mean(recall))
# if np.mean(precision) != 0 and np.mean(recall) != 0:
# F = 2*(precision*recall)/(precision + recall)
# else:
# F = 0
else:
precision = TP/(TP+FP)
recall = TP/(TP+FN)
F = 2*(precision*recall)/(precision + recall)
# Tignanelli Metric
if FN == 0:
TP_FN_R = TP
elif FN > 0:
TP_FN_R = TP/FN
return F, recall, precision, TP, FP, FN, TP_FN_R, TM
def df_to_set(df, analysis_type = 'entity', df_type = 'sys', corpus = None):
# get values for creation of series of type tuple
if 'entity' in analysis_type:
if corpus == 'casi':
arg = df.case, df.overlap
else:
arg = df.begin, df.end, df.case
elif 'cui' in analysis_type:
arg = df.value, df.case
elif 'full' in analysis_type:
arg = df.begin, df.end, df.value, df.case
return set(list(zip(*arg)))
#%%cython
from __main__ import df_to_set, engine
import numpy as np
import pandas as pd
def get_cooccurences(ref, sys, analysis_type: str, corpus: str):
"""
get cooccurences between system and reference; exact match; TODO: add relaxed -> done in single system evals during ensemble run
"""
# cooccurences
class Cooccurences(object):
def __init__(self):
self.ref_system_match = 0
self.ref_only = 0
self.system_only = 0
self.system_n = 0
self.ref_n = 0
self.matches = set()
self.false_negatives = set()
self.corpus = corpus
c = Cooccurences()
if c.corpus != 'casi':
if analysis_type in ['cui', 'full']:
sys = sys.rename(index=str, columns={"note_id": "case", "cui": "value"})
# do not overestimate FP
sys = sys[~sys['value'].isnull()]
ref = ref[~ref['value'].isnull()]
if 'entity' in analysis_type:
sys = sys.rename(index=str, columns={"note_id": "case"})
cols_to_keep = ['begin', 'end', 'case']
elif 'cui' in analysis_type:
cols_to_keep = ['value', 'case']
elif 'full' in analysis_type:
cols_to_keep = ['begin', 'end', 'value', 'case']
sys = sys[cols_to_keep].drop_duplicates()
ref = ref[cols_to_keep].drop_duplicates()
# matches via inner join
tp = pd.merge(sys, ref, how = 'inner', left_on=cols_to_keep, right_on = cols_to_keep)
# reference-only via left outer join
fn = pd.merge(ref, sys, how = 'left', left_on=cols_to_keep, right_on = cols_to_keep, indicator=True)
fn = fn[fn["_merge"] == 'left_only']
tp = tp[cols_to_keep]
fn = fn[cols_to_keep]
# use for metrics
c.matches = c.matches.union(df_to_set(tp, analysis_type, 'ref'))
c.false_negatives = c.false_negatives.union(df_to_set(fn, analysis_type, 'ref'))
c.ref_system_match = len(c.matches)
c.system_only = len(sys) - len(c.matches) # fp
c.system_n = len(sys)
c.ref_n = len(ref)
c.ref_only = len(c.false_negatives)
else:
sql = "select `case` from test.amia_2019_analytical_v where overlap = 1 and `system` = %(sys.name)s"
tp = pd.read_sql(sql, params={"sys.name":sys.name}, con=engine)
sql = "select `case` from test.amia_2019_analytical_v where (overlap = 0 or overlap is null) and `system` = %(sys.name)s"
fn = pd.read_sql(sql, params={"sys.name":sys.name}, con=engine)
c.matches = df_to_set(tp, 'entity', 'sys', 'casi')
c.fn = df_to_set(fn, 'entity', 'sys', 'casi')
c.ref_system_match = len(c.matches)
c.system_only = len(sys) - len(c.matches)
c.system_n = len(tp) + len(fn)
c.ref_n = len(tp) + len(fn)
c.ref_only = len(fn)
# sanity check
if len(ref) - c.ref_system_match < 0:
print('Error: ref_system_match > len(ref)!')
if len(ref) != c.ref_system_match + c.ref_only:
print('Error: ref count mismatch!', len(ref), c.ref_system_match, c.ref_only)
return c
def label_vector(doc: str, ann: List[int], labels: List[str]) -> np.array:
v = np.zeros(doc)
labels = list(labels)
for (i, lab) in enumerate(labels):
i += 1 # 0 is reserved for no label
idxs = [np.arange(a.begin, a.end) for a in ann if a.label == lab]
idxs = [j for mask in idxs for j in mask]
v[idxs] = i
return v
# test confusion matrix elements for vectorized annotation set; includes TN
# https://kawahara.ca/how-to-compute-truefalse-positives-and-truefalse-negatives-in-python-for-binary-classification-problems/
# def confused(sys1, ann1):
# TP = np.sum(np.logical_and(ann1 == 1, sys1 == 1))
# # True Negative (TN): we predict a label of 0 (negative), and the true label is 0.
# TN = np.sum(np.logical_and(ann1 == 0, sys1 == 0))
# # False Positive (FP): we predict a label of 1 (positive), but the true label is 0.
# FP = np.sum(np.logical_and(ann1 == 0, sys1 == 1))
# # False Negative (FN): we predict a label of 0 (negative), but the true label is 1.
# FN = np.sum(np.logical_and(ann1 == 1, sys1 == 0))
# return TP, TN, FP, FN
def confused(sys1, ann1):
TP = np.sum(np.logical_and(ann1 > 0, sys1 == ann1))
# True Negative (TN): we predict a label of 0 (negative), and the true label is 0.
TN = np.sum(np.logical_and(ann1 == 0, sys1 == ann1))
# False Positive (FP): we predict a label of 1 (positive), but the true label is 0.
FP = np.sum(np.logical_and(sys1 > 0, sys1 != ann1))
# False Negative (FN): we predict a label of 0 (negative), but the true label is 1.
FN = np.sum(np.logical_and(ann1 > 0, sys1 == 0))
return TP, TN, FP, FN
@ft.lru_cache(maxsize=None)
def vectorized_cooccurences(r: object, analysis_type: str, corpus: str, filter_semtype, semtype = None) -> np.int64:
docs = get_docs(corpus)
if filter_semtype:
ann = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
else:
ann = get_ref_ann(analysis_type, corpus, filter_semtype)
sys = get_sys_ann(analysis_type, r)
#cvals = []
if analysis_type == 'entity':
labels = ["concept"]
elif analysis_type in ['cui', 'full']:
labels = list(set(ann["value"].tolist()))
sys2 = list()
ann2 = list()
s2 = list()
a2 = list()
for n in range(len(docs)):
if analysis_type != 'cui':
a1 = list(ann.loc[ann["case"] == docs[n][0]].itertuples(index=False))
s1 = list(sys.loc[sys["case"] == docs[n][0]].itertuples(index=False))
ann1 = label_vector(docs[n][1], a1, labels)
sys1 = label_vector(docs[n][1], s1, labels)
#TP, TN, FP, FN = confused(sys1, ann1)
#cvals.append([TP, TN, FP, FN])
sys2.append(list(sys1))
ann2.append(list(ann1))
else:
a = ann.loc[ann["case"] == docs[n][0]]['label'].tolist()
s = sys.loc[sys["case"] == docs[n][0]]['label'].tolist()
x = [1 if x in a else 0 for x in labels]
y = [1 if x in s else 0 for x in labels]
# x_sparse = sparse.csr_matrix(x)
# y_sparse = sparse.csr_matrix(y)
s2.append(y)
a2.append(x)
#a1 = list(ann.loc[ann["case"] == docs[n][0]].itertuples(index=False))
#s1 = list(sys.loc[sys["case"] == docs[n][0]].itertuples(index=False))
if analysis_type != 'cui':
a2 = [item for sublist in ann2 for item in sublist]
s2 = [item for sublist in sys2 for item in sublist]
report = classification_report(a2, s2, output_dict=True)
#print('classification:', report)
macro_precision = report['macro avg']['precision']
macro_recall = report['macro avg']['recall']
macro_f1 = report['macro avg']['f1-score']
TN, FP, FN, TP = confusion_matrix(a2, s2).ravel()
#return (np.sum(cvals, axis=0), (macro_precision, macro_recall, macro_f1))
return ((TP, TN, FP, FN), (macro_precision, macro_recall, macro_f1))
else:
x_sparse = sparse.csr_matrix(a2)
y_sparse = sparse.csr_matrix(s2)
report = classification_report(x_sparse, y_sparse, output_dict=True)
macro_precision = report['macro avg']['precision']
macro_recall = report['macro avg']['recall']
macro_f1 = report['macro avg']['f1-score']
#print((macro_precision, macro_recall, macro_f1))
return ((0, 0, 0, 0), (macro_precision, macro_recall, macro_f1))
def cm_dict(ref_only: int, system_only: int, ref_system_match: int, system_n: int, ref_n: int) -> dict:
"""
Generate dictionary of confusion matrix params and measures
:params: ref_only, system_only, reference_system_match -> sets
matches, system_n, reference_n -> counts
:return: dictionary object
"""
if ref_only + ref_system_match != ref_n:
print('ERROR!')
# get evaluation metrics
F, recall, precision, TP, FP, FN, TP_FN_R, TM = Metrics(system_only, ref_only, ref_system_match, system_n).get_confusion_metrics()
d = {
# 'F1': F[1],
# 'precision': precision[1],
# 'recall': recall[1],
'F1': F,
'precision': precision,
'recall': recall,
'TP': TP,
'FN': FN,
'FP': FP,
'TP/FN': TP_FN_R,
'n_gold': ref_n,
'n_sys': system_n,
'TM': TM
}
if system_n - FP != TP:
print('inconsistent system n!')
return d
@ft.lru_cache(maxsize=None)
def get_metric_data(analysis_type: str, corpus: str):
usys_file, ref_table = AnalysisConfig().corpus_config()
systems = AnalysisConfig().systems
sys_ann = pd.read_csv(analysisConf.data_dir + usys_file, dtype={'note_id': str})
# sql = "SELECT * FROM " + ref_table #+ " where semtype in('Anatomy', 'Chemicals_and_drugs')"
# ref_ann = pd.read_sql(sql, con=engine)
sys_ann = sys_ann.drop_duplicates()
ref_ann = None
return ref_ann, sys_ann
#%%cython
import pandas as pd
from scipy import stats
from scipy.stats.mstats import gmean
def geometric_mean(metrics):
"""
1. Get rank average of F1, TP/FN, TM
http://www.datasciencemadesimple.com/rank-dataframe-python-pandas-min-max-dense-rank-group/
https://stackoverflow.com/questions/46686315/in-pandas-how-to-create-a-new-column-with-a-rank-according-to-the-mean-values-o?rq=1
2. Take geomean of rank averages
https://stackoverflow.com/questions/42436577/geometric-mean-applied-on-row
"""
data = pd.DataFrame()
metrics['F1 rank']=metrics['F1'].rank(ascending=0,method='average')
metrics['TP/FN rank']=metrics['TP/FN'].rank(ascending=0,method='average')
metrics['TM rank']=metrics['TM'].rank(ascending=0,method='average')
metrics['Gmean'] = gmean(metrics.iloc[:,-3:],axis=1)
return metrics
def generate_metrics(analysis_type: str, corpus: str, filter_semtype, semtype = None):
start = time.time()
systems = AnalysisConfig().systems
metrics = pd.DataFrame()
__, sys_ann = get_metric_data(analysis_type, corpus)
c = None
for sys in systems:
if filter_semtype and semtype:
ref_ann = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
else:
ref_ann = get_ref_ann(analysis_type, corpus, filter_semtype)
system_annotations = sys_ann[sys_ann['system'] == sys].copy()
if filter_semtype:
st = SemanticTypes([semtype], corpus).get_system_type(sys)
if st:
system_annotations = sys_ann[sys_ann['semtypes'].isin(st)].copy()
else:
system_annotations = sys_ann.copy()
if (filter_semtype and st) or filter_semtype is False:
system = system_annotations.copy()
if sys == 'quick_umls':
system = system[system.score.astype(float) >= .8]
if sys == 'metamap' and modification == None:
system = system.fillna(0)
system = system[system.score.abs().astype(int) >= 800]
system = system.drop_duplicates()
ref_ann = ref_ann.rename(index=str, columns={"start": "begin", "file": "case"})
c = get_cooccurences(ref_ann, system, analysis_type, corpus) # get matches, FN, etc.
if c.ref_system_match > 0: # compute confusion matrix metrics and write to dictionary -> df
# get dictionary of confusion matrix metrics
d = cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n)
d['system'] = sys
data = pd.DataFrame(d, index=[0])
metrics = pd.concat([metrics, data], ignore_index=True)
metrics.drop_duplicates(keep='last', inplace=True)
else:
print("NO EXACT MATCHES FOR", sys)
elapsed = (time.time() - start)
print("elapsed:", sys, elapsed)
if c:
elapsed = (time.time() - start)
print(geometric_mean(metrics))
now = datetime.now()
timestamp = datetime.timestamp(now)
file_name = 'metrics_'
metrics.to_csv(analysisConf.data_dir + corpus + '_' + file_name + analysis_type + '_' + str(timestamp) + '.csv')
print("total elapsed time:", elapsed)
@ft.lru_cache(maxsize=None)
def get_ref_n(analysis_type: str, corpus: str, filter_semtype: str) -> int:
ref_ann, _ = get_metric_data(analysis_type, corpus)
if filter_semtype:
ref_ann = ref_ann[ref_ann['semtype'].isin(SemanticTypes(semtypes, corpus).get_system_type('reference'))]
if corpus == 'casi':
return len(ref_ann)
else:
# do not overestimate fn
if 'entity' in analysis_type:
ref_ann = ref_ann[['start', 'end', 'file']].drop_duplicates()
elif 'cui' in analysis_type:
ref_ann = ref_ann[['value', 'file']].drop_duplicates()
elif 'full' in analysis_type:
ref_ann = ref_ann[['start', 'end', 'value', 'file']].drop_duplicates()
else:
pass
ref_n = len(ref_ann.drop_duplicates())
return ref_n
@ft.lru_cache(maxsize=None)
def get_sys_data(system: str, analysis_type: str, corpus: str, filter_semtype, semtype = None) -> pd.DataFrame:
_, data = get_metric_data(analysis_type, corpus)
out = data[data['system'] == system].copy()
if filter_semtype:
st = SemanticTypes([semtype], corpus).get_system_type(system)
print(system, 'st', st)
if corpus == 'casi':
cols_to_keep = ['case', 'overlap']
out = out[cols_to_keep].drop_duplicates()
return out
else:
if filter_semtype:
out = out[out['semtype'].isin(st)].copy()
else:
out = out[out['system']== system].copy()
if modification == 'negation':
out = out[out['modification'] == 'negation'].copy()
if system == 'quick_umls':
out = out[(out.score.astype(float) >= 0.8) & (out["type"] == 'concept_jaccard_score_False')]
# fix for leading space on semantic type field
out = out.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
out['semtypes'] = out['semtypes'].str.strip()
if system == 'metamap' and modification == None:
out = out[out.score.abs().astype(int) >= 800]
if 'entity' in analysis_type:
cols_to_keep = ['begin', 'end', 'note_id']
elif 'cui' in analysis_type:
cols_to_keep = ['cui', 'note_id']
elif 'full' in analysis_type:
cols_to_keep = ['begin', 'end', 'cui', 'note_id', 'polarity']
out = out[cols_to_keep]
return out.drop_duplicates()
```
```
class SetTotals(object):
"""
returns an instance with merged match set numbers using either union or intersection of elements in set
"""
def __init__(self, ref_n, sys_n, match_set):
self = self
self.ref_ann = ref_n
self.sys_n = sys_n
self.match_set = match_set
def get_ref_sys(self):
ref_only = self.ref_ann - len(self.match_set)
sys_only = self.sys_n - len(self.match_set)
return ref_only, sys_only, len(self.match_set), self.match_set
def union_vote(arg):
arg['length'] = (arg.end - arg.begin).abs()
df = arg[['begin', 'end', 'note_id', 'cui', 'length', 'polarity']].copy()
df.sort_values(by=['note_id','begin'],inplace=True)
df = df.drop_duplicates(['begin', 'end', 'note_id', 'cui', 'polarity'])
cases = set(df['note_id'].tolist())
data = []
out = pd.DataFrame()
for case in cases:
print(case)
test = df[df['note_id']==case].copy()
for row in test.itertuples():
iix = pd.IntervalIndex.from_arrays(test.begin, test.end, closed='neither')
span_range = pd.Interval(row.begin, row.end)
fx = test[iix.overlaps(span_range)].copy()
maxLength = fx['length'].max()
minLength = fx['length'].min()
if len(fx) > 1:
#if longer span exists use as tie-breaker
if maxLength > minLength:
fx = fx[fx['length'] == fx['length'].max()]
data.append(fx)
out = pd.concat(data, axis=0)
# Remaining ties on span with same or different CUIs
# randomly reindex to keep random selected row when dropping duplicates: https://gist.github.com/cadrev/6b91985a1660f26c2742
out.reset_index(inplace=True)
out = out.reindex(np.random.permutation(out.index))
return out.drop_duplicates(['begin', 'end', 'note_id', 'polarity']) #out.drop('length', axis=1, inplace=True)
@ft.lru_cache(maxsize=None)
def process_sentence(pt, sentence, analysis_type, corpus, filter_semtype, semtype = None):
"""
Recursively evaluate parse tree,
with check for existence before build
:param sentence: to process
:return class of merged annotations, boolean operated system df
"""
class Results(object):
def __init__(self):
self.results = set()
self.system_merges = pd.DataFrame()
r = Results()
if 'entity' in analysis_type and corpus != 'casi':
cols_to_keep = ['begin', 'end', 'note_id', 'polarity'] # entity only
elif 'full' in analysis_type:
cols_to_keep = ['cui', 'begin', 'end', 'note_id', 'polarity'] # entity only
join_cols = ['cui', 'begin', 'end', 'note_id']
elif 'cui' in analysis_type:
cols_to_keep = ['cui', 'note_id', 'polarity'] # entity only
elif corpus == 'casi':
cols_to_keep = ['case', 'overlap']
def evaluate(parseTree):
oper = {'&': op.and_, '|': op.or_}
if parseTree:
leftC = gevent.spawn(evaluate, parseTree.getLeftChild())
rightC = gevent.spawn(evaluate, parseTree.getRightChild())
if leftC.get() is not None and rightC.get() is not None:
system_query = pd.DataFrame()
fn = oper[parseTree.getRootVal()]
if isinstance(leftC.get(), str):
# get system as leaf node
if filter_semtype:
left_sys = get_sys_data(leftC.get(), analysis_type, corpus, filter_semtype, semtype)
else:
left_sys = get_sys_data(leftC.get(), analysis_type, corpus, filter_semtype)
elif isinstance(leftC.get(), pd.DataFrame):
l_sys = leftC.get()
if isinstance(rightC.get(), str):
# get system as leaf node
if filter_semtype:
right_sys = get_sys_data(rightC.get(), analysis_type, corpus, filter_semtype, semtype)
else:
right_sys = get_sys_data(rightC.get(), analysis_type, corpus, filter_semtype)
elif isinstance(rightC.get(), pd.DataFrame):
r_sys = rightC.get()
if fn == op.or_:
if isinstance(leftC.get(), str) and isinstance(rightC.get(), str):
frames = [left_sys, right_sys]
elif isinstance(leftC.get(), str) and isinstance(rightC.get(), pd.DataFrame):
frames = [left_sys, r_sys]
elif isinstance(leftC.get(), pd.DataFrame) and isinstance(rightC.get(), str):
frames = [l_sys, right_sys]
elif isinstance(leftC.get(), pd.DataFrame) and isinstance(rightC.get(), pd.DataFrame):
frames = [l_sys, r_sys]
df = pd.concat(frames, ignore_index=True)
if analysis_type == 'full':
df = union_vote(df)
if fn == op.and_:
if isinstance(leftC.get(), str) and isinstance(rightC.get(), str):
if not left_sys.empty and not right_sys.empty:
df = left_sys.merge(right_sys, on=join_cols, how='inner')
df = df[cols_to_keep].drop_duplicates(subset=cols_to_keep)
else:
df = pd.DataFrame(columns=cols_to_keep)
elif isinstance(leftC.get(), str) and isinstance(rightC.get(), pd.DataFrame):
if not left_sys.empty and not r_sys.empty:
df = left_sys.merge(r_sys, on=join_cols, how='inner')
df = df[cols_to_keep].drop_duplicates(subset=cols_to_keep)
else:
df = pd.DataFrame(columns=cols_to_keep)
elif isinstance(leftC.get(), pd.DataFrame) and isinstance(rightC.get(), str):
if not l_sys.empty and not right_sys.empty:
df = l_sys.merge(right_sys, on=join_cols, how='inner')
df = df[cols_to_keep].drop_duplicates(subset=cols_to_keep)
else:
df = pd.DataFrame(columns=cols_to_keep)
elif isinstance(leftC.get(), pd.DataFrame) and isinstance(rightC.get(), pd.DataFrame):
if not l_sys.empty and not r_sys.empty:
df = l_sys.merge(r_sys, on=join_cols, how='inner')
df = df[cols_to_keep].drop_duplicates(subset=cols_to_keep)
else:
df = pd.DataFrame(columns=cols_to_keep)
# get combined system results
r.system_merges = df
if len(df) > 0:
system_query = system_query.append(df)
else:
print('wtf!')
return system_query
else:
return parseTree.getRootVal()
if sentence.n_or > 0 or sentence.n_and > 0:
evaluate(pt)
# trivial case
elif sentence.n_or == 0 and sentence.n_and == 0:
if filter_semtype:
r.system_merges = get_sys_data(sentence.sentence, analysis_type, corpus, filter_semtype, semtype)
else:
r.system_merges = get_sys_data(sentence.sentence, analysis_type, corpus, filter_semtype)
return r
"""
Incoming Boolean sentences are parsed into a binary tree.
Test expressions to parse:
sentence = '((((A&B)|C)|D)&E)'
sentence = '(E&(D|(C|(A&B))))'
sentence = '(((A|(B&C))|(D&(E&F)))|(H&I))'
"""
# build parse tree from passed sentence using grammatical rules of Boolean logic
def buildParseTree(fpexp):
"""
Iteratively build parse tree from passed sentence using grammatical rules of Boolean logic
:param fpexp: sentence to parse
:return eTree: parse tree representation
Incoming Boolean sentences are parsed into a binary tree.
Test expressions to parse:
sentence = '(A&B)'
sentence = '(A|B)'
sentence = '((A|B)&C)'
"""
fplist = fpexp.split()
pStack = Stack()
eTree = BinaryTree('')
pStack.push(eTree)
currentTree = eTree
for i in fplist:
if i == '(':
currentTree.insertLeft('')
pStack.push(currentTree)
currentTree = currentTree.getLeftChild()
elif i not in ['&', '|', ')']:
currentTree.setRootVal(i)
parent = pStack.pop()
currentTree = parent
elif i in ['&', '|']:
currentTree.setRootVal(i)
currentTree.insertRight('')
pStack.push(currentTree)
currentTree = currentTree.getRightChild()
elif i == ')':
currentTree = pStack.pop()
else:
raise ValueError
return eTree
def make_parse_tree(payload):
"""
Ensure data to create tree are in correct form
:param sentence: sentence to preprocess
:return pt, parse tree graph
sentence, processed sentence to build tree
a: order
"""
def preprocess_sentence(sentence):
# prepare statement for case when a boolean AND/OR is given
sentence = payload.replace('(', ' ( '). \
replace(')', ' ) '). \
replace('&', ' & '). \
replace('|', ' | '). \
replace(' ', ' ')
return sentence
sentence = preprocess_sentence(payload)
print('Processing sentence:', sentence)
pt = buildParseTree(sentence)
#pt.postorder()
return pt
class Sentence(object):
'''
Details about boolean expression -> number operators and expression
'''
def __init__(self, sentence):
self = self
self.n_and = sentence.count('&')
self.n_or = sentence.count('|')
self.sentence = sentence
@ft.lru_cache(maxsize=None)
def get_docs(corpus):
# KLUDGE!!!
if corpus == 'ray_test':
corpus = 'fairview'
sql = 'select distinct note_id, sofa from sofas where corpus = %(corpus)s order by note_id'
df = pd.read_sql(sql, params={"corpus":corpus}, con=engine)
df.drop_duplicates()
df['len_doc'] = df['sofa'].apply(len)
subset = df[['note_id', 'len_doc']]
docs = [tuple(x) for x in subset.to_numpy()]
return docs
@ft.lru_cache(maxsize=None)
def get_ref_ann(analysis_type, corpus, filter_semtype, semtype = None):
if filter_semtype:
if ',' in semtype:
semtype = semtype.split(',')
else:
semtype = [semtype]
ann, _ = get_metric_data(analysis_type, corpus)
ann = ann.rename(index=str, columns={"start": "begin", "file": "case"})
if filter_semtype:
ann = ann[ann['semtype'].isin(semtype)]
if analysis_type == 'entity':
ann["label"] = 'concept'
elif analysis_type in ['cui','full']:
ann["label"] = ann["value"]
if modification == 'negation':
ann = ann[ann['semtype'] == 'negation']
if analysis_type == 'entity':
cols_to_keep = ['begin', 'end', 'case', 'label']
elif analysis_type == 'cui':
cols_to_keep = ['value', 'case', 'label']
elif analysis_type == 'full':
cols_to_keep = ['begin', 'end', 'value', 'case', 'label']
ann = ann[cols_to_keep]
return ann
@ft.lru_cache(maxsize=None)
def get_sys_ann(analysis_type, r):
sys = r.system_merges
sys = sys.rename(index=str, columns={"note_id": "case"})
if analysis_type == 'entity':
sys["label"] = 'concept'
cols_to_keep = ['begin', 'end', 'case', 'label']
elif analysis_type == 'full':
sys["label"] = sys["cui"]
cols_to_keep = ['begin', 'end', 'case', 'value', 'label']
elif analysis_type == 'cui':
sys["label"] = sys["cui"]
cols_to_keep = ['case', 'cui', 'label']
sys = sys[cols_to_keep]
return sys
@ft.lru_cache(maxsize=None)
def get_metrics(boolean_expression: str, analysis_type: str, corpus: str, run_type: str, filter_semtype, semtype = None):
"""
Traverse binary parse tree representation of Boolean sentence
:params: boolean expression in form of '(<annotator_engine_name1><boolean operator><annotator_engine_name2>)'
analysis_type (string value of: 'entity', 'cui', 'full') used to filter set of reference and system annotations
:return: dictionary with values needed for confusion matrix
"""
sentence = Sentence(boolean_expression)
pt = make_parse_tree(sentence.sentence)
if filter_semtype:
r = process_sentence(pt, sentence, analysis_type, corpus, filter_semtype, semtype)
else:
r = process_sentence(pt, sentence, analysis_type, corpus, filter_semtype)
# vectorize merges using i-o labeling
if run_type == 'overlap':
if filter_semtype:
((TP, TN, FP, FN),(p,r,f1)) = vectorized_cooccurences(r, analysis_type, corpus, filter_semtype, semtype)
else:
((TP, TN, FP, FN),(p,r,f1)) = vectorized_cooccurences(r, analysis_type, corpus, filter_semtype)
print('results:',((TP, TN, FP, FN),(p,r,f1)))
# TODO: validate against ann1/sys1 where val = 1
# total by number chars
system_n = TP + FP
reference_n = TP + FN
if analysis_type != 'cui':
d = cm_dict(FN, FP, TP, system_n, reference_n)
else:
d = dict()
d['F1'] = 0
d['precision'] = 0
d['recall'] = 0
d['TP/FN'] = 0
d['TM'] = 0
d['TN'] = TN
d['macro_p'] = p
d['macro_r'] = r
d['macro_f1'] = f1
# return full metrics
return d
elif run_type == 'exact':
# total by number spans
if filter_semtype:
ann = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
else:
ann = get_ref_ann(analysis_type, corpus, filter_semtype)
c = get_cooccurences(ann, r.system_merges, analysis_type, corpus) # get matches, FN, etc.
if c.ref_system_match > 0: # compute confusion matrix metrics and write to dictionary -> df
# get dictionary of confusion matrix metrics
d = cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n)
else:
d = None
return d
#get_valid_systems(['biomedicus'], 'Anatomy')
# generate all combinations of given list of annotators:
def partly_unordered_permutations(lst, k):
elems = set(lst)
for c in combinations(lst, k):
for d in permutations(elems - set(c)):
yield c + d
def expressions(l, n):
for (operations, *operands), operators in product(
combinations(l, n), product(('&', '|'), repeat=n - 1)):
for operation in zip(operators, operands):
operations = [operations, *operation]
yield operations
# get list of systems with a semantic type in grouping
def get_valid_systems(systems, semtype):
test = []
for sys in systems:
st = system_semtype_check(sys, semtype, corpus)
if st:
test.append(sys)
return test
# permute system combinations and evaluate system merges for performance
def run_ensemble(systems, analysis_type, corpus, filter_semtype, expression_type, semtype = None):
metrics = pd.DataFrame()
# pass single system to evaluate
if expression_type == 'single':
for system in systems:
if filter_semtype:
d = get_metrics(system, analysis_type, corpus, run_type, filter_semtype, semtype)
else:
d = get_metrics(system, analysis_type, corpus, run_type, filter_semtype)
d['merge'] = system
d['n_terms'] = 1
frames = [metrics, pd.DataFrame(d, index=[0])]
metrics = pd.concat(frames, ignore_index=True, sort=False)
elif expression_type == 'nested':
for l in partly_unordered_permutations(systems, 2):
print('processing merge combo:', l)
for i in range(1, len(l)+1):
test = list(expressions(l, i))
for t in test:
if i > 1:
# format Boolean sentence for parse tree
t = '(' + " ".join(str(x) for x in t).replace('[','(').replace(']',')').replace("'","").replace(",","").replace(" ","") + ')'
if filter_semtype:
d = get_metrics(t, analysis_type, corpus, run_type, filter_semtype, semtype)
else:
d = get_metrics(t, analysis_type, corpus, run_type, filter_semtype)
d['merge'] = t
d['n_terms'] = i
frames = [metrics, pd.DataFrame(d, index=[0])]
metrics = pd.concat(frames, ignore_index=True, sort=False)
elif expression_type == 'nested_with_singleton' and len(systems) == 5:
# form (((a&b)|c)&(d|e))
nested = list(expressions(systems, 3))
test = list(expressions(systems, 2))
to_do_terms = []
for n in nested:
# format Boolean sentence for parse tree
n = '(' + " ".join(str(x) for x in n).replace('[','(').replace(']',')').replace("'","").replace(",","").replace(" ","") + ')'
for t in test:
t = '(' + " ".join(str(x) for x in t).replace('[','(').replace(']',')').replace("'","").replace(",","").replace(" ","") + ')'
new_and = '(' + n +'&'+ t + ')'
new_or = '(' + n +'|'+ t + ')'
if new_and.count('biomedicus') != 2 and new_and.count('clamp') != 2 and new_and.count('ctakes') != 2 and new_and.count('metamap') != 2 and new_and.count('quick_umls') != 2:
if new_and.count('&') != 4 and new_or.count('|') != 4:
#print(new_and)
#print(new_or)
to_do_terms.append(new_or)
to_do_terms.append(new_and)
print('nested_with_singleton', len(to_do_terms))
for term in to_do_terms:
if filter_semtype:
d = get_metrics(term, analysis_type, corpus, run_type, filter_semtype, semtype)
else:
d = get_metrics(term, analysis_type, corpus, run_type, filter_semtype)
n = term.count('&')
m = term.count('|')
d['merge'] = term
d['n_terms'] = m + n + 1
frames = [metrics, pd.DataFrame(d, index=[0])]
metrics = pd.concat(frames, ignore_index=True, sort=False)
elif expression_type == 'paired':
m = list(expressions(systems, 2))
test = list(expressions(m, 2))
to_do_terms = []
for t in test:
# format Boolean sentence for parse tree
t = '(' + " ".join(str(x) for x in t).replace('[','(').replace(']',')').replace("'","").replace(",","").replace(" ","") + ')'
if t.count('biomedicus') != 2 and t.count('clamp') != 2 and t.count('ctakes') != 2 and t.count('metamap') != 2 and t.count('quick_umls') != 2:
if t.count('&') != 3 and t.count('|') != 3:
to_do_terms.append(t)
if len(systems) == 5:
for i in systems:
if i not in t:
#print('('+t+'&'+i+')')
#print('('+t+'|'+i+')')
new_and = '('+t+'&'+i+')'
new_or = '('+t+'|'+i+')'
to_do_terms.append(new_and)
to_do_terms.append(new_or)
print('paired', len(to_do_terms))
for term in to_do_terms:
if filter_semtype:
d = get_metrics(term, analysis_type, corpus, run_type, filter_semtype, semtype)
else:
d = get_metrics(term, analysis_type, corpus, run_type, filter_semtype)
n = term.count('&')
m = term.count('|')
d['merge'] = term
d['n_terms'] = m + n + 1
frames = [metrics, pd.DataFrame(d, index=[0])]
metrics = pd.concat(frames, ignore_index=True, sort=False)
return metrics
# write to file
def generate_ensemble_metrics(metrics, analysis_type, corpus, ensemble_type, filter_semtype, semtype = None):
now = datetime.now()
timestamp = datetime.timestamp(now)
file_name = corpus + '_all_'
# drop exact matches:
metrics = metrics.drop_duplicates()
if ensemble_type == 'merge':
metrics = metrics.sort_values(by=['n_terms', 'merge'])
file_name += 'merge_'
elif ensemble_type == 'vote':
file_name += '_'
#metrics = metrics.drop_duplicates(subset=['TP', 'FN', 'FP', 'n_sys', 'precision', 'recall', 'F', 'TM', 'TP/FN', 'TM', 'n_terms'])
file = file_name + analysis_type + '_' + run_type +'_'
if filter_semtype:
file += semtype
geometric_mean(metrics).to_csv(analysisConf.data_dir + file + str(timestamp) + '.csv')
print(geometric_mean(metrics))
# control ensemble run
def ensemble_control(systems, analysis_type, corpus, run_type, filter_semtype, semtypes = None):
if filter_semtype:
for semtype in semtypes:
test = get_valid_systems(systems, semtype)
print('SYSTEMS FOR SEMTYPE', semtype, 'ARE', test)
metrics = run_ensemble(test, analysis_type, corpus, filter_semtype, expression_type, semtype)
if (expression_type == 'nested_with_singleton' and len(test) == 5) or expression_type in ['nested', 'paired', 'single']:
generate_ensemble_metrics(metrics, analysis_type, corpus, ensemble_type, filter_semtype, semtype)
else:
metrics = run_ensemble(systems, analysis_type, corpus, filter_semtype, expression_type)
generate_ensemble_metrics(metrics, analysis_type, corpus, ensemble_type, filter_semtype)
# ad hoc query for performance evaluation
def get_merge_data(boolean_expression: str, analysis_type: str, corpus: str, run_type: str, filter_semtype, semtype = None):
"""
Traverse binary parse tree representation of Boolean sentence
:params: boolean expression in form of '(<annotator_engine_name1><boolean operator><annotator_engine_name2>)'
analysis_type (string value of: 'entity', 'cui', 'full') used to filter set of reference and system annotations
:return: dictionary with values needed for confusion matrix
"""
if filter_semtype:
ann = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
else:
ann = get_ref_ann(analysis_type, corpus, filter_semtype)
sentence = Sentence(boolean_expression)
pt = make_parse_tree(sentence.sentence)
r = process_sentence(pt, sentence, analysis_type, corpus, filter_semtype, semtype)
if run_type == 'overlap' and rtype != 6:
if filter_semtype:
((TP, TN, FP, FN),(p,r,f1)) = vectorized_cooccurences(r, analysis_type, corpus, filter_semtype, semtype)
else:
((TP, TN, FP, FN),(p,r,f1)) = vectorized_cooccurences(r, analysis_type, corpus, filter_semtype)
# TODO: validate against ann1/sys1 where val = 1
# total by number chars
system_n = TP + FP
reference_n = TP + FN
d = cm_dict(FN, FP, TP, system_n, reference_n)
print(d)
elif run_type == 'exact':
c = get_cooccurences(ann, r.system_merges, analysis_type, corpus) # get matches, FN, etc.
if c.ref_system_match > 0: # compute confusion matrix metrics and write to dictionary -> df
# get dictionary of confusion matrix metrics
d = cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n)
print('cm', d)
else:
pass
# get matched data from merge
return r.system_merges # merge_eval(reference_only, system_only, reference_system_match, system_n, reference_n)
# ad hoc query for performance evaluation
def get_sys_merge(boolean_expression: str, analysis_type: str, corpus: str, run_type: str, filter_semtype, semtype = None):
"""
Traverse binary parse tree representation of Boolean sentence
:params: boolean expression in form of '(<annotator_engine_name1><boolean operator><annotator_engine_name2>)'
analysis_type (string value of: 'entity', 'cui', 'full') used to filter set of reference and system annotations
:return: dictionary with values needed for confusion matrix
"""
# if filter_semtype:
# ann = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
# else:
# ann = get_ref_ann(analysis_type, corpus, filter_semtype)
sentence = Sentence(boolean_expression)
pt = make_parse_tree(sentence.sentence)
for semtype in semtypes:
test = get_valid_systems(systems, semtype)
r = process_sentence(pt, sentence, analysis_type, corpus, filter_semtype, semtype)
# if run_type == 'overlap' and rtype != 6:
# if filter_semtype:
# ((TP, TN, FP, FN),(p,r,f1)) = vectorized_cooccurences(r, analysis_type, corpus, filter_semtype, semtype)
# else:
# ((TP, TN, FP, FN),(p,r,f1)) = vectorized_cooccurences(r, analysis_type, corpus, filter_semtype)
# # TODO: validate against ann1/sys1 where val = 1
# # total by number chars
# system_n = TP + FP
# reference_n = TP + FN
# d = cm_dict(FN, FP, TP, system_n, reference_n)
# print(d)
# elif run_type == 'exact':
# c = get_cooccurences(ann, r.system_merges, analysis_type, corpus) # get matches, FN, etc.
# if c.ref_system_match > 0: # compute confusion matrix metrics and write to dictionary -> df
# # get dictionary of confusion matrix metrics
# d = cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n)
# print('cm', d)
# else:
# pass
# get matched data from merge
return r.system_merges # merge_eval(reference_only, system_only, reference_system_match, system_n, reference_n)
# majority vote
def vectorized_annotations(ann):
docs = get_docs(corpus)
labels = ["concept"]
out= []
for n in range(len(docs)):
a1 = list(ann.loc[ann["case"] == docs[n][0]].itertuples(index=False))
a = label_vector(docs[n][1], a1, labels)
out.append(a)
return out
def flatten_list(l):
return [item for sublist in l for item in sublist]
def get_reference_vector(analysis_type, corpus, filter_semtype, semtype = None):
ref_ann = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
df = ref_ann.copy()
df = df.drop_duplicates(subset=['begin','end','case'])
df['label'] = 'concept'
cols_to_keep = ['begin', 'end', 'case', 'label']
ref = df[cols_to_keep].copy()
test = vectorized_annotations(ref)
ref = np.asarray(flatten_list(test), dtype=np.int32)
return ref
def majority_overlap_sys(systems, analysis_type, corpus, filter_semtype, semtype = None):
d = {}
cols_to_keep = ['begin', 'end', 'case', 'label']
sys_test = []
for system in systems:
sys_ann = get_sys_data(system, analysis_type, corpus, filter_semtype, semtype)
df = sys_ann.copy()
df['label'] = 'concept'
df = df.rename(index=str, columns={"note_id": "case"})
sys = df[df['system']==system][cols_to_keep].copy()
test = vectorized_annotations(sys)
d[system] = flatten_list(test)
sys_test.append(d[system])
output = sum(np.array(sys_test))
n = int(len(systems) / 2)
#print(n)
if ((len(systems) % 2) != 0):
vote = np.where(output > n, 1, 0)
else:
vote = np.where(output > n, 1,
(np.where(output == n, random.randint(0, 1), 0)))
return vote
def majority_overlap_vote_out(ref, vote, corpus):
TP, TN, FP, FN = confused(ref, vote)
print(TP, TN, FP, FN)
system_n = TP + FP
reference_n = TP + FN
d = cm_dict(FN, FP, TP, system_n, reference_n)
d['TN'] = TN
d['corpus'] = corpus
print(d)
metrics = pd.DataFrame(d, index=[0])
return metrics
# control vote run
def majority_vote(systems, analysis_type, corpus, run_type, filter_semtype, semtypes = None):
print(semtypes, systems)
if filter_semtype:
for semtype in semtypes:
test = get_valid_systems(systems, semtype)
print('SYSYEMS FOR SEMTYPE', semtype, 'ARE', test)
if run_type == 'overlap':
ref = get_reference_vector(analysis_type, corpus, filter_semtype, semtype)
vote = majority_overlap_sys(test, analysis_type, corpus, filter_semtype, semtype)
metrics = majority_overlap_vote_out(ref, vote, corpus)
#generate_ensemble_metrics(metrics, analysis_type, corpus, ensemble_type, filter_semtype, semtype)
elif run_type == 'exact':
sys = majority_exact_sys(test, analysis_type, corpus, filter_semtype, semtype)
d = majority_exact_vote_out(sys, filter_semtype, semtype)
metrics = pd.DataFrame(d, index=[0])
elif run_type == 'cui':
sys = majority_cui_sys(test, analysis_type, corpus, filter_semtype, semtype)
d = majority_cui_vote_out(sys, filter_semtype, semtype)
metrics = pd.DataFrame(d, index=[0])
metrics['systems'] = ','.join(test)
generate_ensemble_metrics(metrics, analysis_type, corpus, ensemble_type, filter_semtype, semtype)
else:
if run_type == 'overlap':
ref = get_reference_vector(analysis_type, corpus, filter_semtype)
vote = majority_overlap_sys(systems, analysis_type, corpus, filter_semtype)
metrics = majority_overlap_vote_out(ref, vote, corpus)
elif run_type == 'exact':
sys = majority_exact_sys(systems, analysis_type, corpus, filter_semtype)
d = majority_exact_vote_out(sys, filter_semtype)
metrics = pd.DataFrame(d, index=[0])
elif run_type == 'cui':
sys = majority_cui_sys(systems, analysis_type, corpus, filter_semtype)
d = majority_cui_vote_out(sys, filter_semtype)
metrics = pd.DataFrame(d, index=[0])
metrics['systems'] = ','.join(systems)
generate_ensemble_metrics(metrics, analysis_type, corpus, ensemble_type, filter_semtype)
print(metrics)
def majority_cui_sys(systems, analysis_type, corpus, filter_semtype, semtype = None):
cols_to_keep = ['cui', 'note_id', 'system']
df = pd.DataFrame()
for system in systems:
if filter_semtype:
sys = get_sys_data(system, analysis_type, corpus, filter_semtype, semtype)
else:
sys = get_sys_data(system, analysis_type, corpus, filter_semtype)
sys = sys[sys['system'] == system][cols_to_keep].drop_duplicates()
frames = [df, sys]
df = pd.concat(frames)
return df
def majority_cui_vote_out(sys, filter_semtype, semtype = None):
sys = sys.astype(str)
sys['value_cui'] = list(zip(sys.cui, sys.note_id.astype(str)))
sys['count'] = sys.groupby(['value_cui'])['value_cui'].transform('count')
n = int(len(systems) / 2)
if ((len(systems) % 2) != 0):
sys = sys[sys['count'] > n]
else:
# https://stackoverflow.com/questions/23330654/update-a-dataframe-in-pandas-while-iterating-row-by-row
for i in sys.index:
if sys.at[i, 'count'] == n:
sys.at[i, 'count'] = random.choice([1,len(systems)])
sys = sys[sys['count'] > n]
sys = sys.drop_duplicates(subset=['value_cui', 'cui', 'note_id'])
ref = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
c = get_cooccurences(ref, sys, analysis_type, corpus) # get matches, FN, etc.
if c.ref_system_match > 0: # compute confusion matrix metrics and write to dictionary -> df
# get dictionary of confusion matrix metrics
print(cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n))
return cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n)
def majority_exact_sys(systems, analysis_type, corpus, filter_semtype, semtype = None):
cols_to_keep = ['begin', 'end', 'note_id', 'system']
df = pd.DataFrame()
for system in systems:
if filter_semtype:
sys = get_sys_data(system, analysis_type, corpus, filter_semtype, semtype)
else:
sys = get_sys_data(system, analysis_type, corpus, filter_semtype)
sys = sys[sys['system'] == system][cols_to_keep].drop_duplicates()
frames = [df, sys]
df = pd.concat(frames)
return df
def majority_exact_vote_out(sys, filter_semtype, semtype = None):
sys['span'] = list(zip(sys.begin, sys.end, sys.note_id.astype(str)))
sys['count'] = sys.groupby(['span'])['span'].transform('count')
n = int(len(systems) / 2)
if ((len(systems) % 2) != 0):
sys = sys[sys['count'] > n]
else:
# https://stackoverflow.com/questions/23330654/update-a-dataframe-in-pandas-while-iterating-row-by-row
for i in sys.index:
if sys.at[i, 'count'] == n:
sys.at[i, 'count'] = random.choice([1,len(systems)])
sys = sys[sys['count'] > n]
sys = sys.drop_duplicates(subset=['span', 'begin', 'end', 'note_id'])
ref = get_ref_ann(analysis_type, corpus, filter_semtype, semtype)
c = get_cooccurences(ref, sys, analysis_type, corpus) # get matches, FN, etc.
if c.ref_system_match > 0: # compute confusion matrix metrics and write to dictionary -> df
# get dictionary of confusion matrix metrics
print(cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n))
return cm_dict(c.ref_only, c.system_only, c.ref_system_match, c.system_n, c.ref_n)
#ensemble_type = 'vote'
#filter_semtype = False
#majority_vote(systems, analysis_type, corpus, run_type, filter_semtype, semtypes)
#%%time
def main():
'''
corpora: i2b2, mipacq, fv017
analyses: entity only (exact span), cui by document, full (aka (entity and cui on exaact span/exact cui)
systems: ctakes, biomedicus, clamp, metamap, quick_umls
TODO -> Vectorization (entity only) -> done:
add switch for use of TN on single system performance evaluations -> done
add switch for overlap matching versus exact span -> done
-> Other tasks besides concept extraction
'''
analysisConf = AnalysisConfig()
print(analysisConf.systems, analysisConf.corpus_config())
if (rtype == 1):
print(semtypes, systems)
if filter_semtype:
for semtype in semtypes:
test = get_valid_systems(systems, semtype)
print('SYSYEMS FOR SEMTYPE', semtype, 'ARE', test)
generate_metrics(analysis_type, corpus, filter_semtype, semtype)
else:
generate_metrics(analysis_type, corpus, filter_semtype)
elif (rtype == 2):
print('run_type:', run_type)
if filter_semtype:
print(semtypes)
ensemble_control(analysisConf.systems, analysis_type, corpus, run_type, filter_semtype, semtypes)
else:
ensemble_control(analysisConf.systems, analysis_type, corpus, run_type, filter_semtype)
elif (rtype == 3):
t = ['concept_jaccard_score_false']
test_systems(analysis_type, analysisConf.systems, corpus)
test_count(analysis_type, corpus)
test_ensemble(analysis_type, corpus)
elif (rtype == 4):
if filter_semtype:
majority_vote(systems, analysis_type, corpus, run_type, filter_semtype, semtypes)
else:
majority_vote(systems, analysis_type, corpus, run_type, filter_semtype)
elif (rtype == 5):
# control filter_semtype in get_sys_data, get_ref_n and generate_metrics. TODO consolidate.
# # run single ad hoc statement
statement = '((ctakes&biomedicus)|metamap)'
def ad_hoc(analysis_type, corpus, statement):
sys = get_merge_data(statement, analysis_type, corpus, run_type, filter_semtype)
sys = sys.rename(index=str, columns={"note_id": "case"})
sys['label'] = 'concept'
ref = get_reference_vector(analysis_type, corpus, filter_semtype)
sys = vectorized_annotations(sys)
sys = np.asarray(flatten_list(list(sys)), dtype=np.int32)
return ref, sys
ref, sys = ad_hoc(analysis_type, corpus, statement)
elif (rtype == 6): # 5 w/o evaluation
statement = '(ctakes|biomedicus)' #((((A∧C)∧D)∧E)∨B)->for covid pipeline
def ad_hoc(analysis_type, corpus, statement):
print(semtypes)
for semtype in semtypes:
sys = get_sys_merge(statement, analysis_type, corpus, run_type, filter_semtype, semtype)
sys = sys.rename(index=str, columns={"note_id": "case"})
return sys
sys = ad_hoc(analysis_type, corpus, statement).sort_values(by=['case', 'begin'])
sys.drop_duplicates(['cui', 'case', 'polarity'],inplace=True)
sys.to_csv(data_directory + 'test_new.csv')
test = sys.copy()
test.drop(['begin','end','case','polarity'], axis=1, inplace=True)
test.to_csv(data_directory + 'test_dedup_new.csv')
if __name__ == '__main__':
#%prun main()
main()
print('done!')
pass
```
|
github_jupyter
|
```
import numpy as np
import scipy as sp
import pandas as pd
import urllib.request
import os
import shutil
import tarfile
import matplotlib.pyplot as plt
from sklearn import datasets, cross_validation, metrics
from sklearn.preprocessing import KernelCenterer
%matplotlib notebook
```
First we need to download the Caltech256 dataset.
```
DATASET_URL = r"http://homes.esat.kuleuven.be/~tuytelaa/"\
"unsup/unsup_caltech256_dense_sift_1000_bow.tar.gz"
DATASET_DIR = "../../../projects/weiyen/data"
filename = os.path.split(DATASET_URL)[1]
dest_path = os.path.join(DATASET_DIR, filename)
if os.path.exists(dest_path):
print("{} exists. Skipping download...".format(dest_path))
else:
with urllib.request.urlopen(DATASET_URL) as response, open(dest_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
print("Dataset downloaded. Extracting files...")
tar = tarfile.open(dest_path)
tar.extractall(path=DATASET_DIR)
print("Files extracted.")
tar.close()
path = os.path.join(DATASET_DIR, "bow_1000_dense/")
```
Calculate multi-class KNFST model for multi-class novelty detection
INPUT
K: NxN kernel matrix containing similarities of n training samples
labels: Nx1 column vector containing multi-class labels of N training samples
OUTPUT
proj: Projection of KNFST
target_points: The projections of training data into the null space
Load the dataset into memory
```
ds = datasets.load_files(path)
ds.data = np.vstack([np.fromstring(txt, sep='\t') for txt in ds.data])
data = ds.data
target = ds.target
```
Select a few "known" classes
```
classes = np.unique(target)
num_class = len(classes)
num_known = 5
known = np.random.choice(classes, num_known)
mask = np.array([y in known for y in target])
X_train = data[mask]
y_train = target[mask]
idx = y_train.argsort()
X_train = X_train[idx]
y_train = y_train[idx]
print(X_train.shape)
print(y_train.shape)
def _hik(x, y):
'''
Implements the histogram intersection kernel.
'''
return np.minimum(x, y).sum()
from scipy.linalg import svd
def nullspace(A, eps=1e-12):
u, s, vh = svd(A)
null_mask = (s <= eps)
null_space = sp.compress(null_mask, vh, axis=0)
return sp.transpose(null_space)
A = np.array([[2,3,5],[-4,2,3],[0,0,0]])
np.array([-4,2,3]).dot(nullspace(A))
```
Train the model, and obtain the projection and class target points.
```
def learn(K, labels):
classes = np.unique(labels)
if len(classes) < 2:
raise Exception("KNFST requires 2 or more classes")
n, m = K.shape
if n != m:
raise Exception("Kernel matrix must be quadratic")
centered_k = KernelCenterer().fit_transform(K)
basis_values, basis_vecs = np.linalg.eigh(centered_k)
basis_vecs = basis_vecs[:,basis_values > 1e-12]
basis_values = basis_values[basis_values > 1e-12]
basis_values = np.diag(1.0/np.sqrt(basis_values))
basis_vecs = basis_vecs.dot(basis_values)
L = np.zeros([n,n])
for cl in classes:
for idx1, x in enumerate(labels == cl):
for idx2, y in enumerate(labels == cl):
if x and y:
L[idx1, idx2] = 1.0/np.sum(labels==cl)
M = np.ones([m,m])/m
H = (((np.eye(m,m)-M).dot(basis_vecs)).T).dot(K).dot(np.eye(n,m)-L)
t_sw = H.dot(H.T)
eigenvecs = nullspace(t_sw)
if eigenvecs.shape[1] < 1:
eigenvals, eigenvecs = np.linalg.eigh(t_sw)
eigenvals = np.diag(eigenvals)
min_idx = eigenvals.argsort()[0]
eigenvecs = eigenvecs[:, min_idx]
proj = ((np.eye(m,m)-M).dot(basis_vecs)).dot(eigenvecs)
target_points = []
for cl in classes:
k_cl = K[labels==cl, :]
pt = np.mean(k_cl.dot(proj), axis=0)
target_points.append(pt)
return proj, np.array(target_points)
kernel_mat = metrics.pairwise_kernels(X_train, metric=_hik)
proj, target_points = learn(kernel_mat, y_train)
def squared_euclidean_distances(x, y):
n = np.shape(x)[0]
m = np.shape(y)[0]
distmat = np.zeros((n,m))
for i in range(n):
for j in range(m):
buff = x[i,:] - y[j,:]
distmat[i,j] = buff.dot(buff.T)
return distmat
def assign_score(proj, target_points, ks):
projection_vectors = ks.T.dot(proj)
sq_dist = squared_euclidean_distances(projection_vectors, target_points)
scores = np.sqrt(np.amin(sq_dist, 1))
return scores
auc_scores = []
classes = np.unique(target)
num_known = 5
for n in range(20):
num_class = len(classes)
known = np.random.choice(classes, num_known)
mask = np.array([y in known for y in target])
X_train = data[mask]
y_train = target[mask]
idx = y_train.argsort()
X_train = X_train[idx]
y_train = y_train[idx]
sample_idx = np.random.randint(0, len(data), size=1000)
X_test = data[sample_idx,:]
y_labels = target[sample_idx]
# Test labels are 1 if novel, otherwise 0.
y_test = np.array([1 if cl not in known else 0 for cl in y_labels])
# Train model
kernel_mat = metrics.pairwise_kernels(X_train, metric=_hik)
proj, target_points = learn(kernel_mat, y_train)
# Test
ks = metrics.pairwise_kernels(X_train, X_test, metric=_hik)
scores = assign_score(proj, target_points, ks)
auc = metrics.roc_auc_score(y_test, scores)
print("AUC:", auc)
auc_scores.append(auc)
fpr, tpr, thresholds = metrics.roc_curve(y_test, scores)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve of the KNFST Novelty Classifier')
plt.legend(loc="lower right")
plt.show()
```
|
github_jupyter
|
```
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from random import randint
from numpy import array
from numpy import argmax
from numpy import array_equal
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input
from keras.layers import LSTM
from keras.layers import Dense
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
# from google.colab import drive
# drive.mount('/content/drive')
# os.chdir("drive/My Drive/Colab Notebooks/Structural/Project")
```
Dataset Preparation and Split
```
dataset = pd.read_csv('./data/dataset.ultrafltr.csv')
print(dataset)
```
Lengths of sequences
```
data = dataset['sequence'].str.len()
counts, bins = np.histogram(data)
plt.hist(bins[:-1], bins, weights=counts)
df_filtered = dataset[dataset['sequence'].str.len() <= 1000]
print(df_filtered.shape)
data = df_filtered['sequence'].str.len()
counts, bins = np.histogram(data)
plt.hist(bins[:-1], bins, weights=counts)
dataset = df_filtered
measurer = np.vectorize(len)
res1 = measurer(dataset.values.astype(str)).max(axis=0)[0]
print(res1)
df, df_test = train_test_split(dataset, test_size=0.1)
print(df)
```
Encoding of Aminoacids
```
codes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
def create_dict(codes):
char_dict = {}
for index, val in enumerate(codes):
char_dict[val] = index+1
return char_dict
char_dict = create_dict(codes)
def integer_encoding(data):
"""
- Encodes code sequence to integer values.
- 20 common amino acids are taken into consideration
and rest 4 are categorized as 0.
"""
row_encode = []
for code in list(data):
row_encode.append(char_dict.get(code, 0))
return row_encode
```
Model
```
# prepare data for the LSTM
def get_dataset(df):
X1, X2, y = list(), list(), list()
for index, row in df.iterrows():
# generate source sequence
source = row['sequence']
# source = source.ljust(res1, '0')
source = integer_encoding(source)
# define padded target sequence
target = row['opm_class']
# target = target.ljust(res1, '0')
target = list(map(int, target))
# create padded input target sequence
target_in = [0] + target[:-1]
# encode
src_encoded = to_categorical(source, num_classes=20+1)
tar_encoded = to_categorical(target, num_classes=2)
tar2_encoded = to_categorical(target_in, num_classes=2)
# store
X1.append(src_encoded)
X2.append(tar2_encoded)
y.append(tar_encoded)
return array(X1), array(X2), array(y)#, temp_df
# Creating the first Dataframe using dictionary
X1, X2, y = get_dataset(df)
X1 = pad_sequences(X1, maxlen=res1, padding='post', truncating='post')
X2 = pad_sequences(X2, maxlen=res1, padding='post', truncating='post')
y = pad_sequences(y, maxlen=res1, padding='post', truncating='post')
# returns train, inference_encoder and inference_decoder models
def define_models(n_input, n_output, n_units):
# define training encoder
encoder_inputs = Input(shape=(None, n_input))
encoder = LSTM(n_units, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# define training decoder
decoder_inputs = Input(shape=(None, n_output))
decoder_lstm = LSTM(n_units, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(n_output, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# define inference encoder
encoder_model = Model(encoder_inputs, encoder_states)
# define inference decoder
decoder_state_input_h = Input(shape=(n_units,))
decoder_state_input_c = Input(shape=(n_units,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
# return all models
return model, encoder_model, decoder_model
train, infenc, infdec = define_models(20+1, 2, 128)
train.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
train.summary()
# train model
train.fit([X1, X2], y, epochs=10)
```
Prediction
```
# decode a one hot encoded string
def one_hot_decode(encoded_seq):
return [argmax(vector) for vector in encoded_seq]
def compare_seqs(source, target):
correct = 0
for i in range(len(source)):
if source[i] == target[i]:
correct += 1
return correct
# generate target given source sequence
def predict_sequence(infenc, infdec, source, n_steps, cardinality):
# encode
state = infenc.predict(source)
# start of sequence input
target_seq = array([0.0 for _ in range(cardinality)]).reshape(1, 1, cardinality)
# collect predictions
output = list()
for t in range(n_steps):
# predict next char
yhat, h, c = infdec.predict([target_seq] + state)
# store prediction
output.append(yhat[0,0,:])
# update state
state = [h, c]
# update target sequence
target_seq = yhat
return array(output)
# evaluate LSTM
X1, X2, y = get_dataset(df_test)
X1 = pad_sequences(X1, maxlen=res1, padding='post', truncating='post')
X2 = pad_sequences(X2, maxlen=res1, padding='post', truncating='post')
y = pad_sequences(y, maxlen=res1, padding='post', truncating='post')
accuracies = []
for i in range(len(X1)):
row = X1[i]
row = row.reshape((1, row.shape[0], row.shape[1]))
target = predict_sequence(infenc, infdec, row, res1, 2)
curr_acc = compare_seqs(one_hot_decode(target), one_hot_decode(y[i]))/res1
accuracies.append(curr_acc)
print(f'Sequence{i} Accuracy: {curr_acc}')
total_acc = 0
for i in range(len(accuracies)):
total_acc += accuracies[i]
print('Total Accuracy: %.2f%%' % (float(total_acc)/float(len(X1))*100.0))
```
|
github_jupyter
|
# R Bootcamp Part 5
## stargazer, xtable, robust standard errors, and fixed effects regressions
This bootcamp will help us get more comfortableusing **stargazer** and **xtable** to produce high-quality results and summary statistics tables, and using `felm()` from the **lfe** package for regressions (both fixed effects and regular OLS).
For today, let's load a few packages and read in a dataset on residential water use for residents in Alameda and Contra Costa Counties.
## Preamble
Here we'll load in our necessary packages and the data file
```
library(tidyverse)
library(haven)
library(lfe)
library(stargazer)
library(xtable)
# load in wateruse data, add in measure of gallons per day "gpd"
waterdata <- read_dta("wateruse.dta") %>%
mutate(gpd = (unit*748)/num_days)
head(waterdata)
```
# Summary Statistics Tables with xtable
`xtable` is a useful package for producing custom summary statistics tables. let's say we're interested in summarizing water use ($gpd$) and degree days ($degree\_days$) according to whether a lot is less than or greater than one acre ($lotsize_1$) or more than 4 acres ($lotsize_4$):
`homesize <- waterdata %>%
select(hh, billingcycle, gpd, degree_days, lotsize) %>%
drop_na() %>%
mutate(lotsize_1 = ifelse((lotsize < 1), "< 1", ">= 1"),
lotsize_4 = ifelse((lotsize > 4), "> 4", "<= 4"))
head(homesize)`
We know how to create summary statistics for these two variables for both levels of $lotsize\_1$ and $lotsize\_4$ using `summarise()`:
`sumstat_1 <- homesize %>%
group_by(lotsize_1) %>%
summarise(mean_gpd = mean(gpd),
mean_degdays = mean(degree_days))
sumstat_1`
`sumstat_4 <- homesize %>%
group_by(lotsize_4) %>%
summarise(mean_gpd = mean(gpd),
mean_degdays = mean(degree_days))
sumstat_4`
And now we can use `xtable()` to put them into the same table!
`full <- xtable(cbind(t(sumstat_1), t(sumstat_4)))
rownames(full)[1] <- "Lotsize Group"
colnames(full) <- c("lotsize_1 = 1", "lotsize_1 = 0", "lotsize_4 = 0", "lotsize_4 =1")
full`
We now have a table `full` that is an xtable object.
We can also spit this table out in html or latex form if needed using the `print.xtable()` function on our xtable `full`, specifying `type = "html":
`print.xtable(full, type = "html")`
Copy and paste the html code here to see how it appears
# Regression Tables in Stargazer
`stargazer` is a super useful package for producing professional-quality regression tables. While it defaults to producing LaTeX format tables (a typesetting language a lot of economists use), for use in our class we can also produce html code that can easily be copied into text cells and formatted perfectly.
If we run the following three regressions:
\begin{align*} GPD_{it} &= \beta_0 + \beta_1 degree\_days_{it} + \beta_2 precip_{it} ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~(1)\\
GPD_{it} &= \beta_0 + \beta_1 degree\_days_{it} + \beta_2 precip_{it} + \beta_3 lotsize_{i}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~(2)\\
GPD_{it} &= \beta_0 + \beta_1 degree\_days_{it} + \beta_2 precip_{it} + \beta_3 lotsize_{i} + \beta_4 Homeval_i~~~~~~~~~~~~~~~~~~(3)
\end{align*}
We might want to present the results side by side in the same table so that we can easily compare coefficients from one column to the other. To do that with `stargazer`, we can
1. Run each regression, storing them in memory
2. Run `stargazer(reg1, reg2, reg3, ..., type )` where the first arguments are all the regression objects we want in the table, and telling R what type of output we want
If we specify `type = "text"`, we'll get the table displayed directly in the output window:
`reg_a <- lm(gpd ~ degree_days + precip, waterdata)
reg_b <- lm(gpd ~ degree_days + precip + lotsize, waterdata)
reg_c <- lm(gpd ~ degree_days + precip + lotsize + homeval, waterdata)`
`stargazer(reg_a, reg_b, reg_c, type = "text")`
And if we specify `type = "html"`, we'll get html code that we need to copy and paste into a text/markdown cell:
`stargazer(reg_a, reg_b, reg_c, type = "html")`
Now all we need to do is copy and paste that html code from the output into a text cell and we've got our table!
(copy your code here)
And we get a nice looking regression table with all three models side by side! This makes it easy to see how the coefficient on lot size falls when we add in home value, letting us quickly figure out the sign of correlation between the two.
## Table Options
Stargazer has a ton of different options for customizing the look of our table with optional arguments, including
* `title` lets us add a custom title
* `column.labels` lets you add text labels to the columns
* `covariate.labels` lets us specify custom labels for all our variables other than the variable names. Specify each label in quotations in the form of a vector with `c()`
* `ci = TRUE` adds in confidence intervals (by default for the 10\% level, but you can change it to the 1\% level with `ci.level = 0.99`
* `intercept.bottom = FALSE` will move the constant to the top of the table
* `digits` lets you choose the number of decimal places to display
* `notes` lets you add some notes at the bottom
For example, we could customize the above table as
`stargazer(reg_a, reg_b, reg_c, type = "text",
title = "Water Use, Weather, and Home Characteristics",
column.labels = c("Weather", "With Lotsize", "With HomeVal"),
covariate.labels = c("Intercept", "Degree Days", "Precipitation (mm)", "Lot Size (Acres)", "Home Value (USD)"),
intercept.bottom = FALSE,
digits = 2,
note = "Isn't stargazer neat?"
)`
# Summary Statistics Tables in Stargazer
Can we use Stargazer for summary statistics tables too? You bet we can!
Stargazer especially comes in handy if we have a lot of variables we want to summarize and one or no variables we want to group them on. This approach works especially well with `across()` within `summarise()`.
For example, let's say we wanted to summarise the median and variance of `gpd`, `precip`, and `degree_days` by whether the home was built after 1980 or not. Rather than create separate tables for all of the variables and merge them together like with xtable, we can just summarise across with
`ss_acr <- mutate(waterdata, pre_80 = ifelse(yearbuilt < 1980, "1. Pre-1980", "2. 1980+")) %>%
group_by(pre_80) %>%
summarise(across(.cols = c(gpd, precip, degree_days),
.fns = list(Median = median, Variance = var)))
ss_acr`
Note that `ifelse()` is a function that follows the format
`ifelse(Condition, Value if True, Value if False)`
Here our condition is that the $yearbuilt$ variable is less than 1980. If it’s true, we want this
new variable to take on the label "1. Pre-1980", and otherwise
be "2. 1980+".
This table then contains everything we want, but having it displayed "wide" like this is a bit tough to see. If we wanted to display it "long" where there is one column for each or pre-1980 and post-1980 homes, we can just use the transpose function `t()`. Placing that within the `stargazer()` call and specifying that we want html code then gets us
`stargazer(t(ss_acr), type = "html")`
(copy your html code here)
## Heteroskedasticity-Robust Standard Errors
There are often times where you want to use heteroskedasticity-robust standard errors in place of the normal kind to account for situations where we might be worried about violating our homoskedasticity assumption. To add robust standard errors to our table, we'll take advantage of the `lmtest` and `sandwich` packages (that we already loaded in the preamble).
If we want to see the coefficient table from Regression B with robust standard errors, we can use the `coeftest()` function as specified below:
`coeftest(reg_b, vcov = vcovHC(reg_b, type = "HC1"))`
What the `vcovHC(reg_a, type = "HC1")` part is doing is telling R we want to calculate standard errors using the heteroskedasticity-robust approach (i.e. telling it a specific form of the variance-covariance matrix between our residuals). `coeftest()` then prints the nice output table.
While this is a nice way to view the robust standard errors in a summary-style table, sometimes we want to extract the robust standard errors so we can use them elsewhere - like in stargazer!
To get a vector of robust standard errors from Regression B, we can use the following:
`robust_b <- sqrt(diag(vcovHC(reg_b, type = "HC1")))`
`robust_b`
Which matches the robust standard errors using `coeftest()` earlier. But woah there, that's a function nested in a function nested in *another function*! Let's break this down step-by-step:
`vcov_b <- vcovHC(reg_b, type = "HC1")`
This first `vcov_b` object is getting the entire variance-covariance matrix for our regression coefficients. Since we again specified `type = "HC1"`, we ensure we get the heteroskedasticity-robust version of this matrix (if we had instead specified `type = "constant"` we would be assuming homoskedasticity and would get our usual variance estimates).
What this looks like is
$$VCOV_b = \begin{matrix}{}
\widehat{Var}(\hat \beta_0) & \widehat{Cov}(\hat \beta_0, \hat\beta_1) & \widehat{Cov}(\hat \beta_0, \hat\beta_2) \\
\widehat{Cov}(\hat \beta_1, \hat\beta_0) & \widehat{Var}(\hat \beta_1) & \widehat{Cov}(\hat \beta_1, \hat\beta_2) \\
\widehat{Cov}(\hat \beta_2, \hat\beta_0) & \widehat{Cov}(\hat \beta_2, \hat\beta_1) & \widehat{Var}(\hat \beta_2)
\end{matrix}$$
Where each element is $\hat\sigma_i$ in the ith row mutiplied by $\hat\sigma_j$ in the jth column. Note that when $i = j$ in the main diagonal, we get the variance estimate for $\hat \beta_i$!
You can check this by running the following lines:
`vcov_b <- vcovHC(reg_b, type = "HC1")
vcov_b`
`var_b <- diag(vcov_b)`
The `diag()` function extracts this main diagonal, giving us a vector of our robust estimated variances
`robust_b <- sqrt(var_b)`
And taking the square root gets us our standard error estimates for our $\hat\beta$'s!
See the process by running the following lines:
`var_b <- diag(vcov_b)
var_b`
`robust_b <- sqrt(var_b)
robust_b`
## Stargazer and Heteroskedasticity-Robust Standard Errors
Now that we know how to get our robust standard errors, we can grab them for all three of our regressions and add them to our beautiful stargazer table:
`robust_a <- sqrt(diag(vcovHC(reg_a, type = "HC1")))
robust_b <- sqrt(diag(vcovHC(reg_b, type = "HC1")))
robust_c <- sqrt(diag(vcovHC(reg_c, type = "HC1")))`
`stargazer(reg_a, reg_b, reg_c,
type = "html",
se = list(robust_a, robust_b, robust_c),
omit.stat = "f")`
Here we're adding the robust standard errors to `stargazer()` with the `se =` argument (combining them together in the right order as a list). I'm also omitting the overall F test at the bottom with `omit.stat = "f"` since we'd need to correct that too for heteroskedasticity.
Try running this code below to see how the standard errors change when we use robust standard errors:
Copy and paste the table code here and run the cell to see it formatted.
Now that looks pretty good, though note that the less than signs in the note for significance labels don't appear right. This is because html is reading the < symbol as a piece of code and not the math symbol. To get around this, you can add dollar signs around the < signs in the html code for the note to have the signs display properly:
`<sup>*</sup>p $<$ 0.1; <sup>**</sup>p $<$ 0.05; <sup>***</sup>p $<$ 0.01</td>`
# Fixed Effects Regression
Today we will practice with fixed effects regressions in __R__. We have two different ways to estimate the model, and we will see how to do both and the situations in which we might favor one versus the other.
Let's give this a try using the dataset `wateruse.dta`. The subset of households are high water users, people who used over 1,000 gallons per billing cycle. We have information on their water use, weather during the period, as well as information on the city and zipcode of where the home is located, and information on the size and value of the house.
Suppose we are interested in running the following panel regression of residential water use:
$$ GPD_{it} = \beta_0 + \beta_1 degree\_days_{it} + \beta_2 precip_{it} ~~~~~~~~~~~~~~~~~~~~~~~(1)$$
Where $GPD$ is the gallons used per day by household $i$ in billing cycle $t$, $degree\_days$ the count of degree days experienced by the household in that billing cycle (degree days are a measure of cumulative time spent above a certain temperature threshold), and $precip$ the amount of precipitation in millimeters.
`reg1 <- lm(gpd ~ degree_days + precip, data = waterdata)
summary(reg1)`
Here we obtain an estimate of $\hat\beta_1 = 0.777$, telling us that an additional degree day per billing cycle is associated with an additional $0.7769$ gallon used per day. These billing cycles are roughly two months long, so this suggests an increase of roughly 47 gallons per billing cycle. Our estimate is statistically significant at all conventional levels, suggesting residential water use does respond to increased exposure to high heat.
We estimate a statistically insignificant coefficient on additional precipitation, which tells us that on average household water use in our sample doesn't adjust to how much it rains.
We might think that characteristics of the home impact how much water is used there, so we add in some home controls:
$$ GPD_{it} = \beta_0 + \beta_1 degree\_days_{it} + \beta_2 precip_{it} + \beta_3 lotsize_{i} + \beta_4 homesize_i + \beta_5 num\_baths_i + \beta_6 num\_beds_i + \beta_7 homeval_i~~~~~~~~~~~~~~~~~~~~~~~(2)$$
`reg2 <- lm(gpd ~ degree_days + precip + lotsize + homesize + num_baths + num_beds + homeval, data = waterdata)
summary(reg2)`
Our coefficient on $degree\_days$ remains statistically significant and doesn't change much, so we find that $\hat\beta_1$ is robust to the addition of home characteristics. Of these characteristics, we obtain statistically significant coefficients on the size of the lot in acres ($lotsize$), the size of the home in square feet ($homesize$), and the number of bedrooms in the home ($num_beds$).
We get a curious result for $\hat\beta_6$: for each additional bedroom in the home we predict that water use will *fall* by 48 gallons per day.
### Discussion: what might be driving this effect?
Since there are likely a number of sources of omitted variable bias in the previous model, we think it might be worth including some fixed effects in our model. These will allow us to control for some of the unobserved sources of OVB without having to measure them directly!
## Method 1: Fixed Effects with lm()
Up to this point we have been running our regressions using the `lm()` function. We can still use `lm()` for our fixed effects models, but it takes some more work and gets increasingly time-intensive as datasets get large.
Recall that we can write our general panel fixed effects model as
$$ y_{it} = \beta x_{it} + \mathbf{a}_i + \mathbf{d}_t + u_{it} $$
* $y$ our outcome of interest, which varies in both the time and cross-sectional dimensions
* $x_{it}$ our set of time-varying unit characteristics
* $\mathbf{a}_i$ our set of unit fixed effects
* $\mathbf{d}_t$ our time fixed effects
We can estimate this model in `lm()` provided we have variables in our dataframe that correspond to each level of $a_i$ and $d_t$. This means we'll have to generate them before we can run any regression.
### Generating Dummy Variables
In order to include fixed effects for our regression, we can first generate the set of dummy variables that we want. For example, if we want to include a set of city fixed effects in our model, we need to generate them.
We can do this in a few ways.
1. First, we can use `mutate()` and add a separate line for each individual city:
`fe_1 <- waterdata %>%
mutate(city_1 = as.numeric((city==1)),
city_2 = as.numeric((city ==2)),
city_3 = as.numeric((city ==3))) %>%
select(n, hh, city, city_1, city_2, city_3)
head(fe_1)`
This can be super tedious though when we have a bunch of different levels of our variable that we want to make fixed effects for. In this case, we have 27 different cities.
2. Alternatively, we can use the `spread()` function to help us out. Here we add in a constant variable `v` that is equal to one in all rows, and a copy of city that adds "city_" to the front of the city number. Then we pass the data to `spread`, telling it to split the variable `cty` into dummy variables for all its levels, with all the "false" cases filled with zeros.
`fe_2 <- waterdata %>%
select(n, city, billingycle)`
`fe_2 %>%
mutate(v = 1, cty = paste0("city_", city)) %>%
spread(cty, v, fill = 0)`
That is much easier!
This is a useful approach if you want to produce summary statistics for the fixed effects (i.e. what share of the sample lives in each city), but isn't truly necessary.
Alternatively, we can tell R to read our fixed effects variables as factors:
`lm(gpd ~ degree_days + precip + factor(city), data = waterdata)`
`factor()` around $city$ tells R to split city into dummy variables for each unique value it takes. R will then drop the first level when we run the regression - in our case making the first city our omitted group.
`reg3 <- lm(gpd ~ degree_days + precip + factor(city), data = waterdata)
summary(reg3)`
Now we have everything we need to run the regression
$$ GPD_{it} = \beta_0 + \beta_1 degree\_days_{it} + \beta_2 precip_{it} + \mathbf{a}_i + \mathbf{d}_t~~~~~~~~~~~~~~~~~~~~~~~(2)$$
Where $\mathbf{a}_i$ are our city fixed effects, and $\mathbf{d}_t$ our billing cycle fixed effects:
`fe_reg1 <- lm(gpd ~ degree_days + precip + factor(city) + factor(billingcycle), data = waterdata)
summary(fe_reg1)`
__R__ automatically chose the first dummy variable for each set of fixed effect (city 1 and billing cycle 1) to leave out as our omitted group.
Now that we account for which billing cycle we're in (i.e. whether we're in the winter or whether we're in the summer), we find that the coefficient on $degree\_days$ is now much smaller and statistically insignificant. This makes sense, as we were falsely attributing the extra water use that comes from seasonality to temperature on its own. Now that we control for the season we're in via billing cycle fixed effects, we find that deviations in temperature exposure during a billing cycle don't result in dramatically higher water use within the sample.
### Discussion: Why did we drop the home characteristics from our model?
## Method 2: Fixed Effects with felm()
Alternatively, we could do everything way faster using the `felm()` function from the package __lfe__. This package doesn't require us to produce all the dummy variables by hand. Further, it performs the background math way faster so will be much quicker to estimate models using large datasets and many variables.
The syntax we use is now
`felm(y ~ x1 + x2 + ... + xk | FE_1 + FE_2 + ..., data = df)`
* The first section $y \sim x1 + x2 +... xk$ is our formula, written the same way as with `lm()` - but omitting the fixed effects
* We now add a `|` and in the second section we specify our fixed effects. Here we say $FE\_1 + FE\_2$ which tells __R__ to include fixed effects for each level of the variables $FE\_1$ and $FE\_2$.
* we add the data source after the comma, as before.
Let's go ahead and try this now with our water data model:
`fe_reg2 <- felm(gpd ~ degree_days + precip | city + billingcycle, data = waterdata)
summary(fe_reg2)`
And we estimate the exact same coefficients on $degree\_days$ and $precip$ as in the case where we specified everything by hand! We didn't have to mutate our data or add any variables. The one potential downside is that this approach doesn't report the fixed effects themselves by default. The tradeoff is that `felm` runs a lot faster than `lm`, especially with large datasets.
We can also recover the fixed effects with getfe():
`getfe(fe_reg2, se = TRUE, robust = TRUE)`
the argument `se = TRUE` tells it to produce standard errors too, and `robust = TRUE` further indicates that we want heteroskedasticity-robust standard errors.
Note that this approach doesn't give you the same reference groups as before, but we get the same relative values. Note that before the coefficient on $city2$ was 301.7 and now is 73.9. But the coefficient on $city1$ is -227.8, and if we subtract $city1$ from $city2$ to get the difference in averages for city 2 relative to city 1 we get $73.9 - (-227.8) = 301.7$, the same as before!
# Fixed Effects Practice Question #1
#### From a random sample of agricultural yields Y (1000 dollars per acre) for region $i$ in year $t$ for the US, we have estimated the following eqation:
\begin{align*} \widehat{\log(Y)}_{it} &= 0.49 + .01 GE_{it} ~~~~ R^2 = .32\\
&~~~~~(.11) ~~~~ (.01) ~~~~ n = 1526 \end{align*}
#### (a) Interpret the results on the Genetically engineered ($GE$) technology on yields. (follow SSS= Sign Size Significance)
#### (b) Suppose $GE$ is used more on the West Coast, where crop yields are also higher. How would the estimated effect of GE change if we include a West Coast region dummy variable in the equation? Justify your answer.
#### (c) If we include region fixed effects, would they control for the factors in (b)? Justify your answer.
#### (d) If yields have been generally improving over time and GE adoption was only recently introduced in the USA, what would happen to the coefficient of GE if we included year fixed effects?
# Fixed Effects Practice Question #2
#### A recent paper investigates whether advertisement for Viagra causes increases in birth rates in the USA. Apparently, advertising for products, including Viagra, happens on TV and reaches households that have a TV within a Marketing region and does not happen in areas outside a designated marketing region. What the authors do is look at hospital birth rates in regions inside and near the advertising region border and collect data on dollars per 100 people (Ads) for a certain time, and compare those to the birth rates in hospitals located outside and near the advertising region designated border. They conduct a panel data analysis and estimate the following model:
$$ Births_{it} = \beta_0 + \beta_1 Ads + \beta_2 Ads^2 + Z_i + M_t + u_{it}$$
#### Where $Z_i$ are zipcode fixed effects and $M_t$ monthly fixed effects.
#### (a) Why do the authors include Zip Code Fixed Effects? In particular, what would be a variable that they are controlling for when adding Zip Code fixed effects that could cause a problem when interpreting the marginal effect of ad spending on birth rates? What would that (solved) problem be?
#### (b) Why do they add month fixed effects?
|
github_jupyter
|
[](https://colab.research.google.com/github/eirasf/GCED-AA2/blob/main/lab4/lab4_parte1.ipynb)
# Práctica 4: Redes neuronales usando Keras con Regularización
## Parte 1. Early Stopping
### Overfitting
El problema del sobreajuste (*overfitting*) consiste en que la solución aprendida se ajusta muy bien a los datos de entrenamiento, pero no generaliza adecuadamente ante la aparición de nuevos datos.
# Regularización
Una vez diagnosticado el sobreajuste, es hora de probar diferentes técnicas que intenten reducir la varianza, sin incrementar demasiado el sesgo y, con ello, el modelo generaliza mejor. Las técnicas de regularización que vamos a ver en este laboratorio son:
1. *Early stopping*. Detiene el entrenamiento de la red cuando aumenta el error.
1. Penalización basada en la norma de los parámetros (tanto norma L1 como L2).
1. *Dropout*. Ampliamente utilizada en aprendizaje profundo, "desactiva" algunas neuronas para evitar el sobreajuste.
En esta primera parte del Laboratorio 4 nos centraremos en **Early Stopping**
## Pre-requisitos. Instalar paquetes
Para la primera parte de este Laboratorio 4 necesitaremos TensorFlow, TensorFlow-Datasets y otros paquetes para inicializar la semilla y poder reproducir los resultados
```
import tensorflow as tf
import tensorflow_datasets as tfds
import os
import numpy as np
import random
#Fijamos la semilla para poder reproducir los resultados
seed=1234
os.environ['PYTHONHASHSEED']=str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
```
Además, cargamos también APIs que vamos a emplear para que el código quede más legible
```
#API de Keras, modelo Sequential y las capas que vamos a usar en nuestro modelo
from tensorflow import keras
from keras.models import Sequential
from keras.layers import InputLayer
from keras.layers import Dense
#Para mostrar gráficas
from matplotlib import pyplot
#Necesario para el EarlyStopping
from keras.callbacks import EarlyStopping
```
## Cargamos el conjunto de datos
De nuevo, seguimos empleando el conjunto *german_credit_numeric* ya empleado en los laboratorios anteriores, aunque esta vez lo dividimos para tener un subconjunto de entrenamiento, otro de validación (que nos servirá para detener el entrenamiento) y otro de test para evaluar el rendimiento del modelo.
```
# Cargamos el conjunto de datos
ds_train = tfds.load('german_credit_numeric', split='train[:40%]', as_supervised=True).batch(128)
ds_val = tfds.load('german_credit_numeric', split='train[40%:50%]', as_supervised=True).batch(128)
ds_test = tfds.load('german_credit_numeric', split='train[50%:]', as_supervised=True).batch(128)
```
También vamos a establecer la función de pérdida, el algoritmo que vamos a emplear para el entrenamiento y la métrica que nos servirá para evaluar el rendimiento del modelo entrenado.
```
#Indicamos la función de perdida, el algoritmo de optimización y la métrica para evaluar el rendimiento
fn_perdida = tf.keras.losses.BinaryCrossentropy()
optimizador = tf.keras.optimizers.Adam(0.001)
metrica = tf.keras.metrics.AUC()
```
## Creamos un modelo *Sequential*
Creamos un modelo *Sequential* tal y como se ha hecho en el Laboratorio 3. Parte 2.
```
tamano_entrada = 24
h0_size = 20
h1_size = 10
h2_size = 5
#TODO - define el modelo Sequential
model = ...
#TODO - incluye la capa de entrada y las 4 capas Dense del modelo
......
#Construimos el modelo y mostramos
model.build()
print(model.summary())
```
Completar el método *compile*.
```
#TODO - indicar los parametros del método compile
model.compile(loss=fn_perdida,
optimizer=optimizador,
metrics=[metrica])
```
Hacemos una llamada al método *fit* usando el conjunto de entrenamiento como entrada, indicando el número de epochs y, además, incluyendo el argumento *validation_data* que permite usar un subconjunto de datos para validar. Las diferencias entre entrenamiento y validación se pueden apreciar en el gráfico.
**NOTA**: Observad las diferencias de resultado entre entrenamiento, validación y test.
```
#Establecemos el número de epochs
num_epochs = 700
# Guardamos los pesos antes de entrenar, para poder resetear el modelo posteriormente y hacer comparativas.
pesos_preentrenamiento = model.get_weights()
#TODO - entrenar el modelo usando como entradas el conjunto de entrenamiento,
#indicando el número de epochs y el conjunto de validación
history = model.fit(....)
# plot training history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='val')
pyplot.legend()
pyplot.show()
#TODO - llamar a evaluate usando el conjunto de test, guardando el resultado
result = model.evaluate(.....)
print(model.metrics_names)
print(result)
```
## Usando Early Stopping en el entrenamiento
Keras nos facilita un *Callback* para realizar la parada temprana (*keras.callbacks.EarlyStopping*). De este modo, podemos parar el entrenamiento cuando una determinada medida (especificada en el argumento *monitor*) empeore su rendimiento (el argumento *mode* nos dirá si se espera que dicha medida se minimice, *min*, o maximice, *max*). Opcionalmente, el usuario puede proporcionar el argumento *patience* para especificar cuantas *epochs* debe esperar el entrenamiento antes de detenerse.
**TO-DO**: Realizar varias veces el entrenamiento, cambiando los distintos parámetros para ver las diferencias en el aprendizaje. ¿Se para siempre en el mismo *epoch*? Comprobar el rendimiento en el conjunto de test.
```
# simple early stopping
#TODO- indica la medida a monitorizar, el modo y la paciencia
es = EarlyStopping(
monitor=....
mode=...
patience=...
)
# Antes de entrenar, olvidamos el entrenamiento anterior restaurando los pesos iniciales
model.set_weights(pesos_preentrenamiento)
#TODO - entrenar el modelo usando como entradas el conjunto de entrenamiento,
#indicando el número de epochs, el conjunto de validación y la callback para el EarlyStopping
history = model.fit(...., callbacks=[es])
# plot training history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='val')
pyplot.legend()
pyplot.show()
```
Evaluación sobre el conjunto de test (no usado para el entrenamiento).
```
#TODO - llamar a evaluate usando el conjunto de test, guardando el resultado
result = model.evaluate(....)
print(model.metrics_names)
print(result)
```
|
github_jupyter
|
# DSCI 525: Web and Cloud Computing
## Milestone 1: Tackling Big Data on Computer
### Group 13
Authors: Ivy Zhang, Mike Lynch, Selma Duric, William Xu
## Table of contents
- [Download the data](#1)
- [Combining data CSVs](#2)
- [Load the combined CSV to memory and perform a simple EDA](#3)
- [Perform a simple EDA in R](#4)
- [Reflection](#5)
### Imports
```
import re
import os
import glob
import zipfile
import requests
from urllib.request import urlretrieve
import json
import pandas as pd
import numpy as np
import pyarrow.feather as feather
from memory_profiler import memory_usage
import pyarrow.dataset as ds
import pyarrow as pa
import pyarrow.parquet as pq
import dask.dataframe as dd
%load_ext rpy2.ipython
%load_ext memory_profiler
```
## 1. Download the data <a name="1"></a>
1. Download the data from figshare to local computer using the figshare API.
2. Extract the zip file programmatically.
```
# Attribution: DSCI 525 lecture notebook
# Necessary metadata
article_id = 14096681 # unique identifier of the article on figshare
url = f"https://api.figshare.com/v2/articles/{article_id}"
headers = {"Content-Type": "application/json"}
output_directory = "figsharerainfall/"
response = requests.request("GET", url, headers=headers)
data = json.loads(response.text)
files = data["files"]
%%time
files_to_dl = ["data.zip"]
for file in files:
if file["name"] in files_to_dl:
os.makedirs(output_directory, exist_ok=True)
urlretrieve(file["download_url"], output_directory + file["name"])
%%time
with zipfile.ZipFile(os.path.join(output_directory, "data.zip"), 'r') as f:
f.extractall(output_directory)
```
## 2. Combining data CSVs <a name="2"></a>
1. Use one of the following options to combine data CSVs into a single CSV (Pandas, Dask). **We used the option of Pandas**.
2. When combining the csv files, we added extra column called "model" that identifies the model (we get this column populated from the file name eg: for file name "SAM0-UNICON_daily_rainfall_NSW.csv", the model name is SAM0-UNICON)
3. Compare run times and memory usages of these options on different machines within the team, and summarize observations.
```
%%time
%memit
# Shows time that regular python takes to merge file
# Join all data together
## here we are using a normal python way of merging the data
# use_cols = ["time", "lat_min", "lat_max", "lon_min","lon_max","rain (mm/day)"]
files = glob.glob('figsharerainfall/*.csv')
df = pd.concat((pd.read_csv(file, index_col=0)
.assign(model=re.findall(r'[^\/]+(?=\_d)', file)[0])
for file in files)
)
df.to_csv("figsharerainfall/combined_data.csv")
%%time
df = pd.read_csv("figsharerainfall/combined_data.csv")
%%sh
du -sh figsharerainfall/combined_data.csv
print(df.shape)
df.head()
```
**Summary of run times and memory usages:**
***William***
- Combining files:
- peak memory: 95.41 MiB, increment: 0.26 MiB
- CPU times: user 7min 28s, sys: 31 s, total: 7min 59s
- Wall time: 9min 17s
- Reading the combined file:
- Wall time: 1min 51s
***Mike***
- Combining files:
- peak memory: 168.59 MiB, increment: 0.12 MiB
- CPU times: user 3min 29s, sys: 5.09 s, total: 3min 34s
- Wall time: 3min 34s
- Reading the combined file:
- Wall time: 37.1 s
***Selma***
- Combining files:
- peak memory: 150.54 MiB, increment: 0.23 MiB
- CPU times: user 6min 46s, sys: 23.1 s, total: 7min 9s
- Wall time: 7min 29s
- Reading the combined file:
- Wall time: 1min 19s
***Ivy***
- Combining files:
- peak memory: 156.23 MiB, increment: 0.00 MiB
- CPU times: user 5min 14s, sys: 18.2 s, total: 5min 32s
- Wall time: 5min 45s
- Reading the combined file:
- Wall time: 1min 30s
## 3. Load the combined CSV to memory and perform a simple EDA <a name="3"></a>
### Establish a baseline for memory usage
```
# First load in the dataset using default settings for dtypes
df_eda = pd.read_csv("figsharerainfall/combined_data.csv", parse_dates=True, index_col='time')
df_eda.head()
# As we can see below, dtypes are float64 and object
df_eda.dtypes
# Measure the memory usage when representing numbers using float64 dtype
print(f"Memory usage with float64: {df_eda.memory_usage().sum() / 1e6:.2f} MB")
%%time
%memit
# Now perform a simple EDA with pandas describe function
df_eda.describe()
```
Baseline memory and time data:
- Memory usage with float64: 3500.78 MB
- peak memory: 698.22 MiB, increment: 0.35 MiB
- CPU times: user 16.2 s, sys: 13.8 s, total: 30 s
- Wall time: 36.5 s
### Effects of changing dtypes on memory usage
```
# Now load in the dataset using float32 dtype to represent numbers
colum_dtypes = {'lat_min': np.float32, 'lat_max': np.float32, 'lon_min': np.float32, 'lon_max': np.float32, 'rain (mm/day)': np.float32, 'model': str}
df_eda = pd.read_csv("figsharerainfall/combined_data.csv",parse_dates=True, index_col='time', dtype=colum_dtypes)
df_eda.head()
# As we can see below, dtypes are float32 and object
df_eda.dtypes
print(f"Memory usage with float32: {df_eda.memory_usage().sum() / 1e6:.2f} MB")
%%time
%memit
# Now perform a simple EDA with pandas describe function
df_eda.describe()
```
Time and memory data when using different dtypes:
- Memory usage with float32: 2250.50 MB
- peak memory: 609.06 MiB, increment: 0.36 MiB
- CPU times: user 11.3 s, sys: 5.72 s, total: 17 s
- Wall time: 22.7 s
### Effects of loading a smaller subset of columns on memory usage
```
# Now load only a subset of columns from the dataset
df_eda = pd.read_csv("figsharerainfall/combined_data.csv",parse_dates=True, index_col='time', usecols=['time', 'lat_min', 'rain (mm/day)'])
df_eda.head()
# As we can see below, dtypes are float64 by default
df_eda.dtypes
print(f"Memory usage with reduced number of columns: {df_eda.memory_usage().sum() / 1e6:.2f} MB")
%%time
%memit
# Now perform a simple EDA with pandas describe function
df_eda.describe()
```
Time and memory data when using column subset:
- Memory usage with reduced number of columns: 1500.33 MB
- peak memory: 340.50 MiB, increment: 0.40 MiB
- CPU times: user 7.13 s, sys: 5.6 s, total: 12.7 s
- Wall time: 18.2 s
### Summary
#### Using float32 vs. baseline float64 dtype to perform a simple EDA:
- The memory usage decreased from 3500.78 MB to 2250.50 MB when representing numbers using float32 instead of float64
- When using the pandas describe function to perform a simple EDA, we found that the peak memory increased when using float32 dtype for the numerical columns.
- The wall time taken to perform the EDA also decreased substantially to 22.7s from the baseline of 36.5s.
#### Using a reduced number of columns compared to the baseline to perform a simple EDA:
- The memory usage decreased from 3500.78 MB to 1500.33 MB when using a subset of columns from the dataset
- When using the pandas describe function to perform a simple EDA, we found that the peak memory increased when using fewer columns.
- The wall time taken to perform the EDA also decreased substantially to 18.2s from the baseline of 36.5s.
## 4. Perform a simple EDA in R <a name="4"></a>
We will transform our dataframe into different formats before loading into R.
#### I. Default memory format + feather file format
```
%%time
feather.write_feather(df, "figsharerainfall/combined_data.feather")
```
#### II. dask + parquet file format
```
ddf = dd.read_csv("figsharerainfall/combined_data.csv")
%%time
dd.to_parquet(ddf, 'figsharerainfall/combined_data.parquet')
```
#### III. Arrow memory format + parquet file format
```
%%time
%%memit
dataset = ds.dataset("figsharerainfall/combined_data.csv", format="csv")
table = dataset.to_table()
%%time
pq.write_to_dataset(table, 'figsharerainfall/rainfall.parquet')
```
#### IV. Arrow memory format + feather file format
```
%%time
feather.write_feather(table, 'figsharerainfall/rainfall.feather')
%%sh
du -sh figsharerainfall/combined_data.csv
du -sh figsharerainfall/combined_data.parquet
du -sh figsharerainfall/rainfall.parquet
du -sh figsharerainfall/rainfall.feather
```
### Transfer different formats of data from Python to R
It is usually not efficient to directly transfer Python dataframe to R due to serialization and deserialization involved in the process. Also, we observe Arrow memory format performs better than the default memory default. Thus, our next step is to further compare the performance of transferring Arrow-feather file and Arrow-parquet file to R.
#### I. Read Arrow-parquet file to R
```python
%%time
%%R
library(arrow)
start_time <- Sys.time()
r_table <- arrow::read_parquet("figsharerainfall/rainfall.parquet/e5a0076fe71f4bdead893e20a935897b.parquet")
print(class(r_table))
library(dplyr)
result <- r_table %>% count(model)
end_time <- Sys.time()
print(result)
print(end_time - start_time)
```

*Note that the code above has been commented out to ensure the workbook is reproducible. Please check Reflection for more details.*
#### II. Read Arrow-feather file to R
```
%%time
%%R
library(arrow)
start_time <- Sys.time()
r_table <- arrow::read_feather("figsharerainfall/rainfall.feather")
print(class(r_table))
library(dplyr)
result <- r_table %>% count(model)
end_time <- Sys.time()
print(result)
print(end_time - start_time)
```
#### Summary of format selection
Based on the data storage and processing time comparison from above, our preferred format among all is **parquet using Arrow package**. The file with this format takes much less space to store it. Also, it takes less time to write to this format and read it in R.
## Reflection <a name="5"></a>
After some trial and error, all team members were individually able to successfully run the analysis from start to finish, however during the process we did experience some problems which included the following:
- William had issue with `%load_ext rpy2.ipython` despite the successful environment installation on his MacOS. After many hours debugging, ry2 finally worked after specifying the python version in the course yml file. The solution is to add `python=3.8.6` to the 525.yml file under `dependencies:` and reinstall the environment.
- Even though the file sizes were only 5 GB, we actually required 10 GB of disk space since we needed to download and unzip the data.
- We got some confusing results by accidentally re-downloading the dataset without first deleting it since we were then combining twice as many files in the next step.
- We noticed that parquet file name under the parquet folder is generated differently every time we re-run the workbook. If we keep current file name and re-run all cells, `arrow::read_parquet` function would return an error message indicating that the file "e5a0076fe71f4bdead893e20a935897b.parquet" does not exist in the directory. For reproducibility reason, we decided to comment out the code but record the output for further comparison.
|
github_jupyter
|
## The 1cycle policy
```
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
from fastai.callbacks import *
```
## What is 1cycle?
This Callback allows us to easily train a network using Leslie Smith's 1cycle policy. To learn more about the 1cycle technique for training neural networks check out [Leslie Smith's paper](https://arxiv.org/pdf/1803.09820.pdf) and for a more graphical and intuitive explanation check out [Sylvain Gugger's post](https://sgugger.github.io/the-1cycle-policy.html).
To use our 1cycle policy we will need an [optimum learning rate](https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html). We can find this learning rate by using a learning rate finder which can be called by using [`lr_finder`](/callbacks.lr_finder.html#callbacks.lr_finder). It will do a mock training by going over a large range of learning rates, then plot them against the losses. We will pick a value a bit before the minimum, where the loss still improves. Our graph would look something like this:

Here anything between `3x10^-2` and `10^-2` is a good idea.
Next we will apply the 1cycle policy with the chosen learning rate as the maximum learning rate. The original 1cycle policy has three steps:
1. We progressively increase our learning rate from lr_max/div_factor to lr_max and at the same time we progressively decrease our momentum from mom_max to mom_min.
2. We do the exact opposite: we progressively decrease our learning rate from lr_max to lr_max/div_factor and at the same time we progressively increase our momentum from mom_min to mom_max.
3. We further decrease our learning rate from lr_max/div_factor to lr_max/(div_factor x 100) and we keep momentum steady at mom_max.
This gives the following form:
<img src="imgs/onecycle_params.png" alt="1cycle parameteres" width="500">
Unpublished work has shown even better results by using only two phases: the same phase 1, followed by a second phase where we do a cosine annealing from lr_max to 0. The momentum goes from mom_min to mom_max by following the symmetric cosine (see graph a bit below).
## Basic Training
The one cycle policy allows to train very quickly, a phenomenon termed [_superconvergence_](https://arxiv.org/abs/1708.07120). To see this in practice, we will first train a CNN and see how our results compare when we use the [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) with [`fit_one_cycle`](/train.html#fit_one_cycle).
```
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
model = simple_cnn((3,16,16,2))
learn = Learner(data, model, metrics=[accuracy])
```
First lets find the optimum learning rate for our comparison by doing an LR range test.
```
learn.lr_find()
learn.recorder.plot()
```
Here 5e-2 looks like a good value, a tenth of the minimum of the curve. That's going to be the highest learning rate in 1cycle so let's try a constant training at that value.
```
learn.fit(2, 5e-2)
```
We can also see what happens when we train at a lower learning rate
```
model = simple_cnn((3,16,16,2))
learn = Learner(data, model, metrics=[accuracy])
learn.fit(2, 5e-3)
```
## Training with the 1cycle policy
Now to do the same thing with 1cycle, we use [`fit_one_cycle`](/train.html#fit_one_cycle).
```
model = simple_cnn((3,16,16,2))
learn = Learner(data, model, metrics=[accuracy])
learn.fit_one_cycle(2, 5e-2)
```
This gets the best of both world and we can see how we get a far better accuracy and a far lower loss in the same number of epochs. It's possible to get to the same amazing results with training at constant learning rates, that we progressively diminish, but it will take a far longer time.
Here is the schedule of the lrs (left) and momentum (right) that the new 1cycle policy uses.
```
learn.recorder.plot_lr(show_moms=True)
show_doc(OneCycleScheduler)
```
Create a [`Callback`](/callback.html#Callback) that handles the hyperparameters settings following the 1cycle policy for `learn`. `lr_max` should be picked with the [`lr_find`](/train.html#lr_find) test. In phase 1, the learning rates goes from `lr_max/div_factor` to `lr_max` linearly while the momentum goes from `moms[0]` to `moms[1]` linearly. In phase 2, the learning rates follows a cosine annealing from `lr_max` to 0, as the momentum goes from `moms[1]` to `moms[0]` with the same annealing.
```
show_doc(OneCycleScheduler.steps, doc_string=False)
```
Build the [`Scheduler`](/callback.html#Scheduler) for the [`Callback`](/callback.html#Callback) according to `steps_cfg`.
### Callback methods
You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality.
```
show_doc(OneCycleScheduler.on_train_begin, doc_string=False)
```
Initiate the parameters of a training for `n_epochs`.
```
show_doc(OneCycleScheduler.on_batch_end, doc_string=False)
```
Prepares the hyperparameters for the next batch.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
## New Methods - Please document or move to the undocumented section
```
show_doc(OneCycleScheduler.on_epoch_end)
```
|
github_jupyter
|
```
from keras.layers import Input, Dense, Activation
from keras.layers import Maximum, Concatenate
from keras.models import Model
from keras.optimizers import adam_v2
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from Ensemble_Classifiers import Ensemble_Classifier
from sklearn.model_selection import train_test_split
import numpy as np
global seed
seed = 0
class MalGAN():
def __init__(self, blackbox, X, Y, threshold):
self.apifeature_dims = 69
self.z_dims = 30
self.generator_layers = [self.apifeature_dims+self.z_dims, 32, 32, 64 , self.apifeature_dims]
# self.generator_layers = [self.apifeature_dims+self.z_dims, 64, 64, 128 , self.apifeature_dims]
self.substitute_detector_layers = [self.apifeature_dims, 64, 64, 1]
# self.substitute_detector_layers = [self.apifeature_dims, 128, 128, 1]
self.blackbox = blackbox
optimizer = adam_v2.Adam(learning_rate=0.0002, beta_1=0.5)
self.X = X
self.Y = Y
self.threshold = threshold
# Build and Train blackbox_detector
self.blackbox_detector = self.build_blackbox_detector()
# Build and compile the substitute_detector
self.substitute_detector = self.build_substitute_detector()
self.substitute_detector.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes malware and noise as input and generates adversarial malware examples
example = Input(shape=(self.apifeature_dims,))
noise = Input(shape=(self.z_dims,))
input = [example, noise]
malware_examples = self.generator(input)
# The discriminator takes generated images as input and determines validity
validity = self.substitute_detector(malware_examples)
# The combined model (stacked generator and substitute_detector)
# Trains the generator to fool the discriminator
self.combined = Model(input, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
# For the combined model we will only train the generator
self.substitute_detector.trainable = False
def build_blackbox_detector(self):
if self.blackbox in ['SVM']:
blackbox_detector = SVC(kernel = 'linear')
elif self.blackbox in ['GB']:
blackbox_detector = GradientBoostingClassifier(random_state=seed)
elif self.blackbox in ['SGD']:
blackbox_detector = SGDClassifier(random_state=seed)
elif self.blackbox in ['DT']:
blackbox_detector = DecisionTreeClassifier(random_state=seed)
elif self.blackbox in ['Ensem']:
blackbox_detector = Ensemble_Classifier()
return blackbox_detector
def build_generator(self):
example = Input(shape=(self.apifeature_dims,))
noise = Input(shape=(self.z_dims,))
x = Concatenate(axis=1)([example, noise])
for dim in self.generator_layers[1:]:
x = Dense(dim)(x)
x = Activation(activation='tanh')(x)
x = Maximum()([example, x])
generator = Model([example, noise], x, name='generator')
generator.summary()
return generator
def build_substitute_detector(self):
input = Input(shape=(self.substitute_detector_layers[0],))
x = input
for dim in self.substitute_detector_layers[1:]:
x = Dense(dim)(x)
x = Activation(activation='sigmoid')(x)
substitute_detector = Model(input, x, name='substitute_detector')
substitute_detector.summary()
return substitute_detector
def load_data(self):
x_ben, x_ran,y_ben, y_ran = self.X[:self.threshold], self.X[self.threshold:], self.Y[:self.threshold], self.Y[self.threshold:]
return (x_ran, y_ran), (x_ben, y_ben)
def train(self, epochs, batch_size=32):
# Load and Split the dataset
(xmal, ymal), (xben, yben) = self.load_data()
xtrain_mal, xtest_mal, ytrain_mal, ytest_mal = train_test_split(xmal, ymal, test_size=0.50)
xtrain_ben, xtest_ben, ytrain_ben, ytest_ben = train_test_split(xben, yben, test_size=0.50)
bl_xtrain_mal, bl_ytrain_mal, bl_xtrain_ben, bl_ytrain_ben = xtrain_mal, ytrain_mal, xtrain_ben, ytrain_ben
self.blackbox_detector.fit(np.concatenate([xmal, xben]), np.concatenate([ymal, yben]))
ytrain_ben_blackbox = self.blackbox_detector.predict(bl_xtrain_ben)
Original_Train_TPR = self.blackbox_detector.score(bl_xtrain_mal, bl_ytrain_mal)
Original_Test_TPR = self.blackbox_detector.score(xtest_mal, ytest_mal)
Train_TPR, Test_TPR = [Original_Train_TPR], [Original_Test_TPR]
for epoch in range(epochs):
for step in range(xtrain_mal.shape[0] // batch_size):
# ---------------------
# Train substitute_detector
# ---------------------
# Select a random batch of malware examples
idx_mal = np.random.randint(0, xtrain_mal.shape[0], batch_size)
xmal_batch = xtrain_mal[idx_mal]
noise = np.random.normal(0, 1, (batch_size, self.z_dims))
idx_ben = np.random.randint(0, xmal_batch.shape[0], batch_size)
xben_batch = xtrain_ben[idx_ben]
yben_batch = ytrain_ben_blackbox[idx_ben]
# Generate a batch of new malware examples
gen_examples = self.generator.predict([xmal_batch, noise])
ymal_batch = self.blackbox_detector.predict(np.ones(gen_examples.shape)*(gen_examples > 0.5))
# Train the substitute_detector
d_loss_real = self.substitute_detector.train_on_batch(gen_examples, ymal_batch)
d_loss_fake = self.substitute_detector.train_on_batch(xben_batch, yben_batch)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
idx = np.random.randint(0, xtrain_mal.shape[0], batch_size)
xmal_batch = xtrain_mal[idx]
noise = np.random.uniform(0, 1, (batch_size, self.z_dims))
# Train the generator
g_loss = self.combined.train_on_batch([xmal_batch, noise], np.zeros((batch_size, 1)))
# Compute Train TPR
noise = np.random.uniform(0, 1, (xtrain_mal.shape[0], self.z_dims))
gen_examples = self.generator.predict([xtrain_mal, noise])
TPR = self.blackbox_detector.score(np.ones(gen_examples.shape) * (gen_examples > 0.5), ytrain_mal)
Train_TPR.append(TPR)
# Compute Test TPR
noise = np.random.uniform(0, 1, (xtest_mal.shape[0], self.z_dims))
gen_examples = self.generator.predict([xtest_mal, noise])
TPR = self.blackbox_detector.score(np.ones(gen_examples.shape) * (gen_examples > 0.5), ytest_mal)
Test_TPR.append(TPR)
print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
if int(epoch) == int(epochs-1):
return d_loss[0], 100*d_loss[1], g_loss
# create the dict to save the D loss, acc and G loss for different classifiers
D_loss_dict, Acc_dict, G_loss_dict = {}, {}, {}
# get the data from Feature-Selector
import pandas as pd
df= pd.read_csv('../dataset/matrix/CLaMP.csv')
df.dtypes.value_counts()
df.columns
# encode categorical column
from sklearn.preprocessing import LabelEncoder
df['packer_type'] = LabelEncoder().fit_transform(df['packer_type'])
df['packer_type'].value_counts()
Y = df['class'].values
X = df.drop('class', axis=1).values
X.shape
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
X
from collections import Counter
Counter(Y)
# load the classifier
for classifier in [ 'SVM', 'SGD', 'DT', 'GB', 'Ensem']:
print('[+] \nTraining the model with {} classifier\n'.format(classifier))
malgan = MalGAN(blackbox=classifier, X=X, Y=Y, threshold = 2488)
d_loss, acc, g_loss = malgan.train(epochs=50, batch_size=32)
D_loss_dict[classifier] = d_loss
Acc_dict[classifier] = acc
G_loss_dict[classifier] = g_loss
print('=====================')
print(D_loss_dict)
print('=====================')
print(Acc_dict)
print('=====================')
print(G_loss_dict)
matrix_dict = {}
for key, value in D_loss_dict.items():
matrix_dict[key] = []
for key, value in D_loss_dict.items():
matrix_dict[key].append(D_loss_dict[key])
matrix_dict[key].append(Acc_dict[key])
matrix_dict[key].append(G_loss_dict[key])
import pandas as pd
df = pd.DataFrame.from_dict(matrix_dict, orient='columns')
df.index= list([ 'D_Loss', 'Acc', 'G_Loss'])
df
import dataframe_image as dfi
dfi.export(df, '64_mal_matrix.png')
```
|
github_jupyter
|
```
import requests
from IPython.display import Markdown
from tqdm import tqdm, tqdm_notebook
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import altair as alt
from requests.utils import quote
import os
from datetime import timedelta
from mod import alt_theme
fmt = "{:%Y-%m-%d}"
# Can optionally use number of days to choose dates
n_days = 60
end_date = fmt.format(pd.datetime.today())
start_date = fmt.format(pd.datetime.today() - timedelta(days=n_days))
renderer = "kaggle"
github_orgs = ["jupyterhub", "jupyter", "jupyterlab", "jupyter-widgets", "ipython", "binder-examples", "nteract"]
bot_names = ["stale", "codecov", "jupyterlab-dev-mode", "henchbot"]
alt.renderers.enable(renderer);
alt.themes.register('my_theme', alt_theme)
alt.themes.enable("my_theme")
# Discourse API key
api = {'Api-Key': os.environ['DISCOURSE_API_KEY'],
'Api-Username': os.environ['DISCOURSE_API_USERNAME']}
# Discourse
def topics_to_markdown(topics, n_list=10):
body = []
for _, topic in topics.iterrows():
title = topic['fancy_title']
slug = topic['slug']
posts_count = topic['posts_count']
url = f'https://discourse.jupyter.org/t/{slug}'
body.append(f'* [{title}]({url}) ({posts_count} posts)')
body = body[:n_list]
return '\n'.join(body)
def counts_from_activity(activity):
counts = activity.groupby('category_id').count()['bookmarked'].reset_index()
counts['parent_category'] = None
for ii, irow in counts.iterrows():
if parent_categories[irow['category_id']] is not None:
counts.loc[ii, 'parent_category'] = parent_categories[irow['category_id']]
counts['category_id'] = counts['category_id'].map(lambda a: category_mapping[a])
counts['parent_category'] = counts['parent_category'].map(lambda a: category_mapping[a] if a is not None else 'parent')
is_parent = counts['parent_category'] == 'parent'
counts.loc[is_parent, 'parent_category'] = counts.loc[is_parent, 'category_id']
counts['parent/category'] = counts.apply(lambda a: a['parent_category']+'/'+a['category_id'], axis=1)
counts = counts.sort_values(['parent_category', 'bookmarked'], ascending=False)
return counts
```
# Community forum activity
The [Jupyter Community Forum](https://discourse.jupyter.org) is a place for Jovyans across the
community to talk about Jupyter tools in interactive computing and how they fit into their
workflows. It's also a place for developers to share ideas, tools, tips, and help one another.
Below are a few updates from activity in the Discourse. For more detailed information about
the activity on the Community Forum, check out these links:
* [The users page](https://discourse.jupyter.org/u) has information about user activity
* [The top posts page](https://discourse.jupyter.org/top) contains a list of top posts, sorted
by various metrics.
```
# Get categories for IDs
url = "https://discourse.jupyter.org/site.json"
resp = requests.get(url, headers=api)
category_mapping = {cat['id']: cat['name'] for cat in resp.json()['categories']}
parent_categories = {cat['id']: cat.get("parent_category_id", None) for cat in resp.json()['categories']}
# Base URL to use
url = "https://discourse.jupyter.org/latest.json"
```
## Topics with lots of likes
"Likes" are a way for community members to say thanks for a helpful post, show their
support for an idea, or generally to share a little positivity with somebody else.
These are topics that have generated lots of likes in recent history.
```
params = {"order": "likes", "ascending": "False"}
resp = requests.get(url, headers=api, params=params)
# Topics with the most likes in recent history
liked = pd.DataFrame(resp.json()['topic_list']['topics'])
Markdown(topics_to_markdown(liked))
```
## Active topics on the Community Forum
These are topics with lots of activity in recent history.
```
params = {"order": "posts", "ascending": "False"}
resp = requests.get(url, headers=api, params=params)
# Topics with the most posts in recent history
posts = pd.DataFrame(resp.json()['topic_list']['topics'])
Markdown(topics_to_markdown(posts))
counts = counts_from_activity(posts)
alt.Chart(data=counts, width=700, height=300, title="Activity by category").mark_bar().encode(
x=alt.X("parent/category", sort=alt.Sort(counts['category_id'].values.tolist())),
y="bookmarked",
color="parent_category"
)
```
## Recently-created topics
These are topics that were recently created, sorted by the amount of activity
in each one.
```
params = {"order": "created", "ascending": "False"}
resp = requests.get(url, headers=api, params=params)
# Sort created by the most posted for recently-created posts
created = pd.DataFrame(resp.json()['topic_list']['topics'])
created = created.sort_values('posts_count', ascending=False)
Markdown(topics_to_markdown(created))
counts = counts_from_activity(created)
alt.Chart(data=counts, width=700, height=300, title="Activity by category").mark_bar().encode(
x=alt.X("parent/category", sort=alt.Sort(counts['category_id'].values.tolist())),
y="bookmarked",
color="parent_category"
)
```
## User activity in the Community Forum
**Top posters**
These people have posted lots of comments, replies, answers, etc in the community forum.
```
def plot_user_data(users, column, sort=False):
plt_data = users.sort_values(column, ascending=False).head(50)
x = alt.X("username", sort=plt_data['username'].tolist()) if sort is True else 'username'
ch = alt.Chart(data=plt_data).mark_bar().encode(
x=x,
y=column
)
return ch
url = "https://discourse.jupyter.org/directory_items.json"
params = {"period": "quarterly", "order": "post_count"}
resp = requests.get(url, headers=api, params=params)
# Topics with the most likes in recent history
users = pd.DataFrame(resp.json()['directory_items'])
users['username'] = users['user'].map(lambda a: a['username'])
plot_user_data(users.head(50), 'post_count')
```
**Forum users, sorted by likes given**
These are Community Forum members that "liked" other people's posts. We appreciate
anybody taking the time to tell someone else they like what they're shared!
```
plot_user_data(users.head(50), 'likes_given')
```
**Forum users, sorted by likes received**
These are folks that posted things other people in the Community Forum liked.
```
plot_user_data(users.head(50), 'likes_received')
%%html
<script src="https://cdn.rawgit.com/parente/4c3e6936d0d7a46fd071/raw/65b816fb9bdd3c28b4ddf3af602bfd6015486383/code_toggle.js"></script>
```
|
github_jupyter
|
# StyleGAN2
*Please note that this is an optional notebook that is meant to introduce more advanced concepts, if you're up for a challenge. So, don't worry if you don't completely follow every step! We provide external resources for extra base knowledge required to grasp some components of the advanced material.*
In this notebook, you're going to learn about StyleGAN2, from the paper [Analyzing and Improving the Image Quality of StyleGAN](https://arxiv.org/abs/1912.04958) (Karras et al., 2019), and how it builds on StyleGAN. This is the V2 of StyleGAN, so be prepared for even more extraordinary outputs. Here's the quick version:
1. **Demodulation.** The instance normalization of AdaIN in the original StyleGAN actually was producing “droplet artifacts” that made the output images clearly fake. AdaIN is modified a bit in StyleGAN2 to make this not happen. Below, *Figure 1* from the StyleGAN2 paper is reproduced, showing the droplet artifacts in StyleGAN.

2. **Path length regularization.** “Perceptual path length” (or PPL, which you can explore in [another optional notebook](https://www.coursera.org/learn/build-better-generative-adversarial-networks-gans/ungradedLab/BQjUq/optional-ppl)) was introduced in the original StyleGAN paper, as a metric for measuring the disentanglement of the intermediate noise space W. PPL measures the change in the output image, when interpolating between intermediate noise vectors $w$. You'd expect a good model to have a smooth transition during interpolation, where the same step size in $w$ maps onto the same amount of perceived change in the resulting image.
Using this intuition, you can make the mapping from $W$ space to images smoother, by encouraging a given change in $w$ to correspond to a constant amount of change in the image. This is known as path length regularization, and as you might expect, included as a term in the loss function. This smoothness also made the generator model "significantly easier to invert"! Recall that inversion means going from a real or fake image to finding its $w$, so you can easily adapt the image's styles by controlling $w$.
3. **No progressive growing.** While progressive growing was seemingly helpful for training the network more efficiently and with greater stability at lower resolutions before progressing to higher resolutions, there's actually a better way. Instead, you can replace it with 1) a better neural network architecture with skip and residual connections (which you also see in Course 3 models, Pix2Pix and CycleGAN), and 2) training with all of the resolutions at once, but gradually moving the generator's _attention_ from lower-resolution to higher-resolution dimensions. So in a way, still being very careful about how to handle different resolutions to make training eaiser, from lower to higher scales.
There are also a number of performance optimizations, like calculating the regularization less frequently. We won't focus on those in this notebook, but they are meaningful technical contributions.
But first, some useful imports:
```
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
def show_tensor_images(image_tensor, num_images=16, size=(3, 64, 64), nrow=3):
'''
Function for visualizing images: Given a tensor of images, number of images,
size per image, and images per row, plots and prints the images in an uniform grid.
'''
image_tensor = (image_tensor + 1) / 2
image_unflat = image_tensor.detach().cpu().clamp_(0, 1)
image_grid = make_grid(image_unflat[:num_images], nrow=nrow, padding=2)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.axis('off')
plt.show()
```
## Fixing Instance Norm
One issue with instance normalization is that it can lose important information that is typically communicated by relative magnitudes. In StyleGAN2, it was proposed that the droplet artifects are a way for the network to "sneak" this magnitude information with a single large spike. This issue was also highlighted in the paper which introduced GauGAN, [Semantic Image Synthesis with Spatially-Adaptive Normalization](https://arxiv.org/abs/1903.07291) (Park et al.), earlier in 2019. In that more extreme case, instance normalization could sometimes eliminate all semantic information, as shown in their paper's *Figure 3*:

While removing normalization is technically possible, it reduces the controllability of the model, a major feature of StyleGAN. Here's one solution from the paper:
### Output Demodulation
The first solution notes that the scaling the output of a convolutional layer by style has a consistent and numerically reproducible impact on the standard deviation of its output. By scaling down the standard deviation of the output to 1, the droplet effect can be reduced.
More specifically, the style $s$, when applied as a multiple to convolutional weights $w$, resulting in weights $w'_{ijk}=s_i \cdot w_{ijk}$ will have standard deviation $\sigma_j = \sqrt{\sum_{i,k} w'^2_{ijk}}$. One can simply divide the output of the convolution by this factor.
However, the authors note that dividing by this factor can also be incorporated directly into the the convolutional weights (with an added $\epsilon$ for numerical stability):
$$w''_{ijk}=\frac{w'_{ijk}}{\sqrt{\sum_{i,k} w'^2_{ijk} + \epsilon}}$$
This makes it so that this entire operation can be baked into a single convolutional layer, making it easier to work with, implement, and integrate into the existing architecture of the model.
```
class ModulatedConv2d(nn.Module):
'''
ModulatedConv2d Class, extends/subclass of nn.Module
Values:
channels: the number of channels the image has, a scalar
w_dim: the dimension of the intermediate tensor, w, a scalar
'''
def __init__(self, w_dim, in_channels, out_channels, kernel_size, padding=1):
super().__init__()
self.conv_weight = nn.Parameter(
torch.randn(out_channels, in_channels, kernel_size, kernel_size)
)
self.style_scale_transform = nn.Linear(w_dim, in_channels)
self.eps = 1e-6
self.padding = padding
def forward(self, image, w):
# There is a more efficient (vectorized) way to do this using the group parameter of F.conv2d,
# but for simplicity and readibility you will go through one image at a time.
images = []
for i, w_cur in enumerate(w):
# Calculate the style scale factor
style_scale = self.style_scale_transform(w_cur)
# Multiply it by the corresponding weight to get the new weights
w_prime = self.conv_weight * style_scale[None, :, None, None]
# Demodulate the new weights based on the above formula
w_prime_prime = w_prime / torch.sqrt(
(w_prime ** 2).sum([1, 2, 3])[:, None, None, None] + self.eps
)
images.append(F.conv2d(image[i][None], w_prime_prime, padding=self.padding))
return torch.cat(images)
def forward_efficient(self, image, w):
# Here's the more efficient approach. It starts off mostly the same
style_scale = self.style_scale_transform(w)
w_prime = self.conv_weight[None] * style_scale[:, None, :, None, None]
w_prime_prime = w_prime / torch.sqrt(
(w_prime ** 2).sum([2, 3, 4])[:, :, None, None, None] + self.eps
)
# Now, the trick is that we'll make the images into one image, and
# all of the conv filters into one filter, and then use the "groups"
# parameter of F.conv2d to apply them all at once
batchsize, in_channels, height, width = image.shape
out_channels = w_prime_prime.shape[2]
# Create an "image" where all the channels of the images are in one sequence
efficient_image = image.view(1, batchsize * in_channels, height, width)
efficient_filter = w_prime_prime.view(batchsize * out_channels, in_channels, *w_prime_prime.shape[3:])
efficient_out = F.conv2d(efficient_image, efficient_filter, padding=self.padding, groups=batchsize)
return efficient_out.view(batchsize, out_channels, *image.shape[2:])
example_modulated_conv = ModulatedConv2d(w_dim=128, in_channels=3, out_channels=3, kernel_size=3)
num_ex = 2
image_size = 64
rand_image = torch.randn(num_ex, 3, image_size, image_size) # A 64x64 image with 3 channels
rand_w = torch.randn(num_ex, 128)
new_image = example_modulated_conv(rand_image, rand_w)
second_modulated_conv = ModulatedConv2d(w_dim=128, in_channels=3, out_channels=3, kernel_size=3)
second_image = second_modulated_conv(new_image, rand_w)
print("Original noise (left), noise after modulated convolution (middle), noise after two modulated convolutions (right)")
plt.rcParams['figure.figsize'] = [8, 8]
show_tensor_images(torch.stack([rand_image, new_image, second_image], 1).view(-1, 3, image_size, image_size))
```
## Path Length Regularization
Path length regularization was introduced based on the usefulness of PPL, or perceptual path length, a metric used of evaluating disentanglement proposed in the original StyleGAN paper -- feel free to check out the [optional notebook](https://www.coursera.org/learn/build-better-generative-adversarial-networks-gans/ungradedLab/BQjUq/optional-ppl) for a detailed overview! In essence, for a fixed-size step in any direction in $W$ space, the metric attempts to make the change in image space to have a constant magnitude $a$. This is accomplished (in theory) by first taking the Jacobian of the generator with respect to $w$, which is $\mathop{\mathrm{J}_{\mathrm{w}}}={\partial g(\mathrm{w})} / {\partial \mathrm{w}}$.
Then, you take the L2 norm of Jacobian matrix and you multiply that by random images (that you sample from a normal distribution, as you often do):
$\Vert \mathrm{J}_{\mathrm{w}}^T \mathrm{y} \Vert_2$. This captures the expected magnitude of the change in pixel space. From this, you get a loss term, which penalizes the distance between this magnitude and $a$. The paper notes that this has similarities to spectral normalization (discussed in [another optional notebook](https://www.coursera.org/learn/build-basic-generative-adversarial-networks-gans/ungradedLab/c2FPs/optional-sn-gan) in Course 1), because it constrains multiple norms.
An additional optimization is also possible and ultimately used in the StyleGAN2 model: instead of directly computing $\mathrm{J}_{\mathrm{w}}^T \mathrm{y}$, you can more efficiently calculate the gradient
$\nabla_{\mathrm{w}} (g(\mathrm{w}) \cdot \mathrm{y})$.
Finally, a bit of talk on $a$: $a$ is not a fixed constant, but an exponentially decaying average of the magnitudes over various runs -- as with most times you see (decaying) averages being used, this is to smooth out the value of $a$ across multiple iterations, not just dependent on one. Notationally, with decay rate $\gamma$, $a$ at the next iteration $a_{t+1} = {a_t} * (1 - \gamma) + \Vert \mathrm{J}_{\mathrm{w}}^T \mathrm{y} \Vert_2 * \gamma$.
However, for your one example iteration you can treat $a$ as a constant for simplicity. There is also an example of an update of $a$ after the calculation of the loss, so you can see what $a_{t+1}$ looks like with exponential decay.
```
# For convenience, we'll define a very simple generator here:
class SimpleGenerator(nn.Module):
'''
SimpleGenerator Class, for path length regularization demonstration purposes
Values:
channels: the number of channels the image has, a scalar
w_dim: the dimension of the intermediate tensor, w, a scalar
'''
def __init__(self, w_dim, in_channels, hid_channels, out_channels, kernel_size, padding=1, init_size=64):
super().__init__()
self.w_dim = w_dim
self.init_size = init_size
self.in_channels = in_channels
self.c1 = ModulatedConv2d(w_dim, in_channels, hid_channels, kernel_size)
self.activation = nn.ReLU()
self.c2 = ModulatedConv2d(w_dim, hid_channels, out_channels, kernel_size)
def forward(self, w):
image = torch.randn(len(w), self.in_channels, self.init_size, self.init_size).to(w.device)
y = self.c1(image, w)
y = self.activation(y)
y = self.c2(y, w)
return y
from torch.autograd import grad
def path_length_regulization_loss(generator, w, a):
# Generate the images from w
fake_images = generator(w)
# Get the corresponding random images
random_images = torch.randn_like(fake_images)
# Output variation that we'd like to regularize
output_var = (fake_images * random_images).sum()
# Calculate the gradient with respect to the inputs
cur_grad = grad(outputs=output_var, inputs=w)[0]
# Calculate the distance from a
penalty = (((cur_grad - a) ** 2).sum()).sqrt()
return penalty, output_var
simple_gen = SimpleGenerator(w_dim=128, in_channels=3, hid_channels=64, out_channels=3, kernel_size=3)
samples = 10
test_w = torch.randn(samples, 128).requires_grad_()
a = 10
penalty, variation = path_length_regulization_loss(simple_gen, test_w, a=a)
decay = 0.001 # How quickly a should decay
new_a = a * (1 - decay) + variation * decay
print(f"Old a: {a}; new a: {new_a.item()}")
```
## No More Progressive Growing
While the concepts behind progressive growing remain, you get to see how that is revamped and beefed up in StyleGAN2. This starts with generating all resolutions of images from the very start of training. You might be wondering why they didn't just do this in the first place: in the past, this has generally been unstable to do. However, by using residual or skip connections (there are two variants that both do better than without them), StyleGAN2 manages to replicate many of the dynamics of progressive growing in a less explicit way. Three architectures were considered for StyleGAN2 to replace the progressive growing.
Note that in the following figure, *tRGB* and *fRGB* refer to the $1 \times 1$ convolutions which transform the noise with some number channels at a given layer into a three-channel image for the generator, and vice versa for the discriminator.

*The set of architectures considered for StyleGAN2 (from the paper). Ultimately, the skip generator and residual discriminator (highlighted in green) were chosen*.
### Option a: MSG-GAN
[MSG-GAN](https://arxiv.org/abs/1903.06048) (from Karnewar and Wang 2019), proposed a somewhat natural approach: generate all resolutions of images, but also directly pass each corresponding resolution to a block of the discriminator responsible for dealing with that resolution.
### Option b: Skip Connections
In the skip-connection approach, each block takes the previous noise as input and generates the next resolution of noise. For the generator, each noise is converted to an image, upscaled to the maximum size, and then summed together. For the discriminator, the images are downsampled to each block's size and converted to noises.
### Option c: Residual Nets
In the residual network approach, each block adds residual detail to the noise, and the image conversion happens at the end for the generator and at the start for the discriminator.
### StyleGAN2: Skip Generator, Residual Discriminator
By experiment, the skip generator and residual discriminator were chosen. One interesting effect is that, as the images for the skip generator are additive, you can explicitly see the contribution from each of them, and measure the magnitude of each block's contribution. If you're not 100% sure how to implement skip and residual models yet, don't worry - you'll get a lot of practice with that in Course 3!

*Figure 8 from StyleGAN2 paper, showing generator contributions by different resolution blocks of the generator over time. The y-axis is the standard deviation of the contributions, and the x-axis is the number of millions of images that the model has been trained on (training progress).*
Now, you've seen the primary changes, and you understand the current state-of-the-art in image generation, StyleGAN2, congratulations!
If you're the type of person who reads through the optional notebooks for fun, maybe you'll make the next state-of-the-art! Can't wait to cover your GAN in a new notebook :)
|
github_jupyter
|
```
from azure.common import AzureMissingResourceHttpError
from azure.storage.blob import BlockBlobService, PublicAccess
from azure.storage.file import FileService
from azure.storage.table import TableService, Entity
#Blob Service...
def get_block_blob_service(account_name, storage_key):
return BlockBlobService(account_name=account_name, account_key=storage_key)
def blob_service_create_container(account_name, storage_key, container_name):
containers = blob_service_list_containers(account_name, storage_key)
if container_name not in containers:
block_blob_service = get_block_blob_service(account_name, storage_key)
block_blob_service.create_container(container_name)
block_blob_service.set_container_acl(container_name, public_access=PublicAccess.Container)
def blob_service_create_blob_from_bytes(account_name, storage_key, container_name, blob_name, blob):
block_blob_service = get_block_blob_service(account_name, storage_key)
block_blob_service.create_blob_from_bytes(container_name, blob_name, blob)
def blob_service_get_blob_to_path(account_name, storage_key, container_name, blob_name, file_path):
block_blob_service = get_block_blob_service(account_name, storage_key)
block_blob_service.get_blob_to_path(container_name, blob_name, file_path)
def blob_service_insert(account_name, storage_key, container_name, blob_name, text):
block_blob_service = get_block_blob_service(account_name, storage_key)
block_blob_service.create_blob_from_text(container_name, blob_name, text)
def blob_service_list_blobs(account_name, storage_key, container_name):
blobs = []
block_blob_service = get_block_blob_service(account_name, storage_key)
generator = block_blob_service.list_blobs(container_name)
for blob in generator:
blobs.append(blob.name)
return blobs
def blob_service_list_containers(account_name, storage_key):
containers = []
block_blob_service = get_block_blob_service(account_name, storage_key)
generator = block_blob_service.list_containers()
for container in generator:
containers.append(container.name)
return containers
# File Service...
def get_file_service(account_name, storage_key):
return FileService(account_name=account_name, account_key=storage_key)
def file_service_list_directories_and_files(account_name, storage_key, share_name, directory_name):
file_or_dirs = []
file_service = get_file_service(account_name, storage_key)
generator = file_service.list_directories_and_files(share_name, directory_name)
for file_or_dir in generator:
file_or_dirs.append(file_or_dir.name)
return file_or_dirs
# Table Service...
def get_table_service(account_name, storage_key):
return TableService(account_name=account_name, account_key=storage_key)
def table_service_get_entity(account_name, storage_key, table, partition_key, row_key):
table_service = get_table_service(account_name, storage_key)
return table_service.get_entity(table, partition_key, row_key)
def table_service_insert(account_name, storage_key, table, entity):
table_service = get_table_service(account_name, storage_key)
table_service.insert_entity(table, entity)
def table_service_query_entities(account_name, storage_key, table, filter):
table_service = get_table_service(account_name, storage_key)
return table_service.query_entities(table, filter)
```
|
github_jupyter
|
# Project 1: Navigation
### Test 3 - DDQN model with Prioritized Experience Replay
<sub>Uirá Caiado. August 23, 2018<sub>
#### Abstract
_In this notebook, I will use the Unity ML-Agents environment to train a DDQN model with PER for the first project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893)._
## 1. What we are going to test
Quoting the seminal [Prioritized Experience Replay](https://arxiv.org/abs/1511.05952) paper, from the Deep Mind team, experience replay lets online reinforcement learning agents remember and reuse experiences from the past. Bellow, I am going to test my implementation of the PER buffer in conjunction to Double DQN. Thus, let's begin by checking the environment where I am going to run these tests.
```
%load_ext version_information
%version_information numpy, unityagents, torch, matplotlib, pandas, gym
```
Now, let's define some meta variables to use in this notebook
```
import os
fig_prefix = 'figures/2018-08-23-'
data_prefix = '../data/2018-08-23-'
s_currentpath = os.getcwd()
```
Also, let's import some of the necessary packages for this experiment.
```
from unityagents import UnityEnvironment
import sys
import os
sys.path.append("../") # include the root directory as the main
import eda
import pandas as pd
import numpy as np
```
## 2. Training the agent
The environment used for this project is the Udacity version of the Banana Collector environment, from [Unity](https://youtu.be/heVMs3t9qSk). The goal of the agent is to collect as many yellow bananas as possible while avoiding blue bananas. Bellow, we are going to start this environment.
```
env = UnityEnvironment(file_name="../Banana_Linux_NoVis/Banana.x86_64")
```
Unity Environments contain brains which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
```
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
```
Now, we are going to collect some basic information about the environment.
```
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of actions
action_size = brain.vector_action_space_size
# examine the state space
state = env_info.vector_observations[0]
state_size = len(state)
```
And finally, we are going to train the model. We will consider that this environment is solved if the agent is able to receive an average reward (over 100 episodes) of at least +13.
```
%%time
import gym
import pickle
import random
import torch
import numpy as np
from collections import deque
from drlnd.dqn_agent import DQNAgent, DDQNAgent, DDQNPREAgent
n_episodes = 2000
eps_start = 1.
eps_end=0.01
eps_decay=0.995
max_t = 1000
s_model = 'ddqnpre'
agent = DDQNPREAgent(state_size=state_size, action_size=action_size, seed=0)
scores = [] # list containing scores from each episode
scores_std = [] # List containing the std dev of the last 100 episodes
scores_avg = [] # List containing the mean of the last 100 episodes
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
for t in range(max_t):
# action = np.random.randint(action_size) # select an action
action = agent.act(state, eps)
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
agent.step(state, action, reward, next_state, done)
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
scores_std.append(np.std(scores_window)) # save most recent std dev
scores_avg.append(np.mean(scores_window)) # save most recent std dev
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=13.0:
s_msg = '\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'
print(s_msg.format(i_episode, np.mean(scores_window)))
torch.save(agent.qnet.state_dict(), '%scheckpoint_%s.pth' % (data_prefix, s_model))
break
# save data to use latter
d_data = {'episodes': i_episode,
'scores': scores,
'scores_std': scores_std,
'scores_avg': scores_avg,
'scores_window': scores_window}
pickle.dump(d_data, open('%ssim-data-%s.data' % (data_prefix, s_model), 'wb'))
```
## 3. Results
The agent using Double DQN with Prioritized Experience Replay was able to solve the Banana Collector environment in 562 episodes of 1000 steps, each.
```
import pickle
d_data = pickle.load(open('../data/2018-08-23-sim-data-ddqnpre.data', 'rb'))
s_msg = 'Environment solved in {:d} episodes!\tAverage Score: {:.2f} +- {:.2f}'
print(s_msg.format(d_data['episodes'], np.mean(d_data['scores_window']), np.std(d_data['scores_window'])))
```
Now, let's plot the rewards per episode. In the right panel, we will plot the rolling average score over 100 episodes $\pm$ its standard deviation, as well as the goal of this project (13+ on average over the last 100 episodes).
```
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
%matplotlib inline
#recover data
na_raw = np.array(d_data['scores'])
na_mu = np.array(d_data['scores_avg'])
na_sigma = np.array(d_data['scores_std'])
# plot the scores
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True)
# plot the sores by episode
ax1.plot(np.arange(len(na_raw)), na_raw)
ax1.set_xlim(0, len(na_raw)+1)
ax1.set_ylabel('Score')
ax1.set_xlabel('Episode #')
ax1.set_title('raw scores')
# plot the average of these scores
ax2.axhline(y=13., xmin=0.0, xmax=1.0, color='r', linestyle='--', linewidth=0.7, alpha=0.9)
ax2.plot(np.arange(len(na_mu)), na_mu)
ax2.fill_between(np.arange(len(na_mu)), na_mu+na_sigma, na_mu-na_sigma, facecolor='gray', alpha=0.1)
ax2.set_ylabel('Average Score')
ax2.set_xlabel('Episode #')
ax2.set_title('average scores')
f.tight_layout()
# f.savefig(fig_prefix + 'ddqnpre-learning-curve.eps', format='eps', dpi=1200)
f.savefig(fig_prefix + 'ddqnpre-learning-curve.jpg', format='jpg')
env.close()
```
## 4. Conclusion
The Double Deep Q-learning agent using Prioritized Experience Replay was able to solve the environment in 562 episodes and was the worst performance among all implementations. However, something that is worth noting that this implementation is seen to present the most smooth learning curve.
```
import pickle
d_ddqnper = pickle.load(open('../data/2018-08-23-sim-data-ddqnpre.data', 'rb'))
d_ddqn = pickle.load(open('../data/2018-08-24-sim-data-ddqn.data', 'rb'))
d_dqn = pickle.load(open('../data/2018-08-24-sim-data-dqn.data', 'rb'))
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
%matplotlib inline
def recover_data(d_data):
#recover data
na_raw = np.array(d_data['scores'])
na_mu = np.array(d_data['scores_avg'])
na_sigma = np.array(d_data['scores_std'])
return na_raw, na_mu, na_sigma
# plot the scores
f, ax2 = plt.subplots(1, 1, figsize=(8, 4), sharex=True, sharey=True)
for s_model, d_data in zip(['DQN', 'DDQN', 'DDQN with PER'], [d_ddqnper, d_ddqn, d_dqn]):
na_raw, na_mu, na_sigma = recover_data(d_data)
if s_model == 'DDQN with PER':
ax2.set_xlim(0, 572)
# plot the average of these scores
ax2.axhline(y=13., xmin=0.0, xmax=1.0, color='r', linestyle='--', linewidth=0.7, alpha=0.9)
ax2.plot(np.arange(len(na_mu)), na_mu, label=s_model)
# ax2.fill_between(np.arange(len(na_mu)), na_mu+na_sigma, na_mu-na_sigma, alpha=0.15)
# format axis
ax2.legend()
ax2.set_title('Learning Curves')
ax2.set_ylabel('Average Score in 100 episodes')
ax2.set_xlabel('Episode #')
# Shrink current axis's height by 10% on the bottom
box = ax2.get_position()
ax2.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
lgd = ax2.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10),
fancybox=False, shadow=False, ncol=3)
f.tight_layout()
f.savefig(fig_prefix + 'final-comparition-2.eps', format='eps',
bbox_extra_artists=(lgd,), bbox_inches='tight', dpi=1200)
```
Finally, let's compare the score distributions generated by the agents. I am going to perform the one-sided Welch's unequal variances t-test for the null hypothesis that the DDQN model has the expected score higher than the other agents on the final 100 episodes of each experiment. As the implementation of the t-test in the [Scipy](https://goo.gl/gs222c) assumes a two-sided t-test, to perform the one-sided test, we will divide the p-value by 2 to compare to a critical value of 0.05 and requires that the t-value is greater than zero.
```
import pandas as pd
def extract_info(s, d_data):
return {'model': s,
'episodes': d_data['episodes'],
'mean_score': np.mean(d_data['scores_window']),
'std_score': np.std(d_data['scores_window'])}
l_data = [extract_info(s, d) for s, d in zip(['DDQN with PER', 'DDQN', 'DQN'],
[d_ddqnper, d_ddqn, d_dqn])]
df = pd.DataFrame(l_data)
df.index = df.model
df.drop('model', axis=1, inplace=True)
print(df.sort_values(by='episodes'))
import scipy
#performs t-test
a = [float(pd.DataFrame(d_dqn['scores']).iloc[-1].values)] * 2
b = list(pd.DataFrame(d_rtn_test_1r['pnl']['test']).fillna(method='ffill').iloc[-1].values)
tval, p_value = scipy.stats.ttest_ind(a, b, equal_var=False)
import scipy
tval, p_value = scipy.stats.ttest_ind(d_ddqn['scores'], d_dqn['scores'], equal_var=False)
print("DDQN vs. DQN: t-value = {:0.6f}, p-value = {:0.8f}".format(tval, p_value))
tval, p_value = scipy.stats.ttest_ind(d_ddqn['scores'], d_ddqnper['scores'], equal_var=False)
print("DDQN vs. DDQNPRE: t-value = {:0.6f}, p-value = {:0.8f}".format(tval, p_value))
```
There was no significant difference between the performances of the agents.
|
github_jupyter
|
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Training and Evaluation with TensorFlow Keras
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/alpha/guide/keras/training_and_evaluation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/keras/training_and_evaluation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/keras/training_and_evaluation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
This guide covers training, evaluation, and prediction (inference) models in TensorFlow 2.0 in two broad situations:
- When using built-in APIs for training & validation (such as `model.fit()`, `model.evaluate()`, `model.predict()`). This is covered in the section **"Using build-in training & evaluation loops"**.
- When writing custom loops from scratch using eager execution and the `GradientTape` object. This is covered in the section **"Writing your own training & evaluation loops from scratch"**.
In general, whether you are using built-in loops or writing your own, model training & evaluation works strictly in the same way across every kind of Keras model -- Sequential models, models built with the Functional API, and models written from scratch via model subclassing.
This guide doesn't cover distributed training.
## Setup
```
!pip install pydot
!apt-get install graphviz
from __future__ import absolute_import, division, print_function
!pip install tensorflow-gpu==2.0.0-alpha0
import tensorflow as tf
tf.keras.backend.clear_session() # For easy reset of notebook state.
```
## Part I: Using build-in training & evaluation loops
When passing data to the built-in training loops of a model, you should either use **Numpy arrays** (if your data is small and fits in memory) or **tf.data Dataset** objects. In the next few paragraphs, we'll use the MNIST dataset as Numpy arrays, in order to demonstrate how to use optimizers, losses, and metrics.
### API overview: a first end-to-end example
Let's consider the following model (here, we build in with the Functional API, but it could be a Sequential model or a subclassed model as well):
```
from tensorflow import keras
from tensorflow.keras import layers
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Here's what the typical end-to-end workflow looks like, consisting of training, validation on a holdout set generated from the original training data, and finally evaluation on the test data:
```
# Load a toy dataset for the sake of this example
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are Numpy arrays)
x_train = x_train.reshape(60000, 784).astype('float32') / 255
x_test = x_test.reshape(10000, 784).astype('float32') / 255
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Specify the training configuration (optimizer, loss, metrics)
model.compile(optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()])
# Train the model by slicing the data into "batches"
# of size "batch_size", and repeatedly iterating over
# the entire dataset for a given number of "epochs"
print('# Fit model on training data')
history = model.fit(x_train, y_train,
batch_size=64,
epochs=3,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val))
# The returned "history" object holds a record
# of the loss values and metric values during training
print('\nhistory dict:', history.history)
# Evaluate the model on the test data using `evaluate`
print('\n# Evaluate on test data')
results = model.evaluate(x_test, y_test, batch_size=128)
print('test loss, test acc:', results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print('\n# Generate predictions for 3 samples')
predictions = model.predict(x_test[:3])
print('predictions shape:', predictions.shape)
```
### Specifying a loss, metrics, and an optimizer
To train a model with `fit`, you need to specify a loss function, an optimizer, and optionally, some metrics to monitor.
You pass these to the model as arguments to the `compile()` method:
```
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
```
The `metrics` argument should be a list -- you model can have any number of metrics.
If your model has multiple outputs, you can specify different losses and metrics for each output,
and you can modulate to contribution of each output to the total loss of the model. You will find more details about this in the section "**Passing data to multi-input, multi-output models**".
Note that in many cases, the loss and metrics are specified via string identifiers, as a shortcut:
```
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
```
For later reuse, let's put our model definition and compile step in functions; we will call them several times across different examples in this guide.
```
def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def get_compiled_model():
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
```
#### Many built-in optimizers, losses, and metrics are available
In general, you won't have to create from scratch your own losses, metrics, or optimizers, because what you need is likely already part of the Keras API:
Optimizers:
- `SGD()` (with or without momentum)
- `RMSprop()`
- `Adam()`
- etc.
Losses:
- `MeanSquaredError()`
- `KLDivergence()`
- `CosineSimilarity()`
- etc.
Metrics:
- `AUC()`
- `Precision()`
- `Recall()`
- etc.
#### Writing custom losses and metrics
If you need a metric that isn't part of the API, you can easily create custom metrics by subclassing the `Metric` class. You will need to implement 4 methods:
- `__init__(self)`, in which you will create state variables for your metric.
- `update_state(self, y_true, y_pred, sample_weight=None)`, which uses the targets `y_true` and the model predictions `y_pred` to update the state variables.
- `result(self)`, which uses the state variables to compute the final results.
- `reset_states(self)`, which reinitializes the state of the metric.
State update and results computation are kept separate (in `update_state()` and `result()`, respectively) because in some cases, results computation might be very expensive, and would only be done periodically.
Here's a simple example showing how to implement a `CatgoricalTruePositives` metric, that counts how many samples where correctly classified as belonging to a given class:
```
class CatgoricalTruePositives(keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(CatgoricalTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.argmax(y_pred)
values = tf.equal(tf.cast(y_true, 'int32'), tf.cast(y_pred, 'int32'))
values = tf.cast(values, 'float32')
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, 'float32')
values = tf.multiply(values, sample_weight)
return self.true_positives.assign_add(tf.reduce_sum(values)) # TODO: fix
def result(self):
return tf.identity(self.true_positives) # TODO: fix
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.)
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CatgoricalTruePositives()])
model.fit(x_train, y_train,
batch_size=64,
epochs=3)
```
#### Handling losses and metrics that don't fit the standard signature
The overwhelming majority of losses and metrics can be computed from `y_true` and `y_pred`, where `y_pred` is an output of your model. But not all of them. For instance, a regularization loss may only require the activation of a layer (there are no targets in this case), and this activation may not be a model output.
In such cases, you can call `self.add_loss(loss_value)` from inside the `call` method of a custom layer. Here's a simple example that adds activity regularization (note that activity regularization is built-in in all Keras layers -- this layer is just for the sake of providing a concrete example):
```
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs) * 0.1)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss='sparse_categorical_crossentropy')
# The displayed loss will be much higher than before
# due to the regularization component.
model.fit(x_train, y_train,
batch_size=64,
epochs=1)
```
You can do the same for logging metric values:
```
class MetricLoggingLayer(layers.Layer):
def call(self, inputs):
# The `aggregation` argument defines
# how to aggregate the per-batch values
# over each epoch:
# in this case we simply average them.
self.add_metric(keras.backend.std(inputs),
name='std_of_activation',
aggregation='mean')
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
# Insert std logging as a layer.
x = MetricLoggingLayer()(x)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss='sparse_categorical_crossentropy')
model.fit(x_train, y_train,
batch_size=64,
epochs=1)
```
In the [Functional API](functional.ipynb), you can also call `model.add_loss(loss_tensor)`, or `model.add_metric(metric_tensor, name, aggregation)`.
Here's a simple example:
```
inputs = keras.Input(shape=(784,), name='digits')
x1 = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x2 = layers.Dense(64, activation='relu', name='dense_2')(x1)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
model.add_loss(tf.reduce_sum(x1) * 0.1)
model.add_metric(keras.backend.std(x1),
name='std_of_activation',
aggregation='mean')
model.compile(optimizer=keras.optimizers.RMSprop(1e-3),
loss='sparse_categorical_crossentropy')
model.fit(x_train, y_train,
batch_size=64,
epochs=1)
```
#### Automatically setting apart a validation holdout set
In the first end-to-end example you saw, we used the `validation_data` argument to pass a tuple
of Numpy arrays `(x_val, y_val)` to the model for evaluating a validation loss and validation metrics at the end of each epoch.
Here's another option: the argument `validation_split` allows you to automatically reserve part of your training data for validation. The argument value represents the fraction of the data to be reserved for validation, so it should be set to a number higher than 0 and lower than 1. For instance, `validation_split=0.2` means "use 20% of the data for validation", and `validation_split=0.6` means "use 60% of the data for validation".
The way the validation is computed is by *taking the last x% samples of the arrays received by the `fit` call, before any shuffling*.
You can only use `validation_split` when training with Numpy data.
```
model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=3)
```
### Training & evaluation from tf.data Datasets
In the past few paragraphs, you've seen how to handle losses, metrics, and optimizers, and you've seen how to use the `validation_data` and `validation_split` arguments in `fit`, when your data is passed as Numpy arrays.
Let's now take a look at the case where your data comes in the form of a tf.data Dataset.
The tf.data API is a set of utilities in TensorFlow 2.0 for loading and preprocessing data in a way that's fast and scalable.
For a complete guide about creating Datasets, see [the tf.data documentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf).
You can pass a Dataset instance directly to the methods `fit()`, `evaluate()`, and `predict()`:
```
model = get_compiled_model()
# First, let's create a training Dataset instance.
# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Now we get a test dataset.
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
# Since the dataset already takes care of batching,
# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)
# You can also evaluate or predict on a dataset.
print('\n# Evaluate')
model.evaluate(test_dataset)
```
Note that the Dataset is reset at the end of each epoch, so it can be reused of the next epoch.
If you want to run training only on a specific number of batches from this Dataset, you can pass the `steps_per_epoch` argument, which specifies how many training steps the model should run using this Dataset before moving on to the next epoch.
If you do this, the dataset is not reset at the end of each epoch, instead we just keep drawing the next batches. The dataset will eventually run out of data (unless it is an infinitely-looping dataset).
```
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)
```
#### Using a validation dataset
You can pass a Dataset instance as the `validation_data` argument in `fit`:
```
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=3, validation_data=val_dataset)
```
At the end of each epoch, the model will iterate over the validation Dataset and compute the validation loss and validation metrics.
If you want to run validation only on a specific number of batches from this Dataset, you can pass the `validation_steps` argument, which specifies how many validation steps the model should run with the validation Dataset before interrupting validation and moving on to the next epoch:
```
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=3,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset, validation_steps=10)
```
Note that the validation Dataset will be reset after each use (so that you will always be evaluating on the same samples from epoch to epoch).
The argument `validation_split` (generating a holdout set from the training data) is not supported when training from Dataset objects, since this features requires the ability to index the samples of the datasets, which is not possible in general with the Dataset API.
### Other input formats supported
Besides Numpy arrays and TensorFlow Datasets, it's possible to train a Keras model using Pandas dataframes, or from Python generators that yield batches.
In general, we recommend that you use Numpy input data if your data is small and fits in memory, and Datasets otherwise.
### Using sample weighting and class weighting
Besides input data and target data, it is possible to pass sample weights or class weights to a model when using `fit`:
- When training from Numpy data: via the `sample_weight` and `class_weight` arguments.
- When training from Datasets: by having the Dataset return a tuple `(input_batch, target_batch, sample_weight_batch)` .
A "sample weights" array is an array of numbers that specify how much weight each sample in a batch should have in computing the total loss. It is commonly used in imbalanced classification problems (the idea being to give more weight to rarely-seen classes). When the weights used are ones and zeros, the array can be used as a *mask* for the loss function (entirely discarding the contribution of certain samples to the total loss).
A "class weights" dict is a more specific instance of the same concept: it maps class indices to the sample weight that should be used for samples belonging to this class. For instance, if class "0" is twice less represented than class "1" in your data, you could use `class_weight={0: 1., 1: 0.5}`.
Here's a Numpy example where we use class weights or sample weights to give more importance to the correct classification of class #5 (which is the digit "5" in the MNIST dataset).
```
import numpy as np
class_weight = {0: 1., 1: 1., 2: 1., 3: 1., 4: 1.,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.,
6: 1., 7: 1., 8: 1., 9: 1.}
model.fit(x_train, y_train,
class_weight=class_weight,
batch_size=64,
epochs=4)
# Here's the same example using `sample_weight` instead:
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.
model = get_compiled_model()
model.fit(x_train, y_train,
sample_weight=sample_weight,
batch_size=64,
epochs=4)
```
Here's a matching Dataset example:
```
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.
# Create a Dataset that includes sample weights
# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train, sample_weight))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model = get_compiled_model()
model.fit(train_dataset, epochs=3)
```
### Passing data to multi-input, multi-output models
In the previous examples, we were considering a model with a single input (a tensor of shape `(764,)`) and a single output (a prediction tensor of shape `(10,)`). But what about models that have multiple inputs or outputs?
Consider the following model, which has an image input of shape `(32, 32, 3)` (that's `(height, width, channels)`) and a timeseries input of shape `(None, 10)` (that's `(timesteps, features)`). Our model will have two outputs computed from the combination of these inputs: a "score" (of shape `(1,)`) and a probability distribution over 5 classes (of shape `(10,)`).
```
from tensorflow import keras
from tensorflow.keras import layers
image_input = keras.Input(shape=(32, 32, 3), name='img_input')
timeseries_input = keras.Input(shape=(None, 10), name='ts_input')
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name='score_output')(x)
class_output = layers.Dense(5, activation='softmax', name='class_output')(x)
model = keras.Model(inputs=[image_input, timeseries_input],
outputs=[score_output, class_output])
```
Let's plot this model, so you can clearly see what we're doing here (note that the shapes shown in the plot are batch shapes, rather than per-sample shapes).
```
keras.utils.plot_model(model, 'multi_input_and_output_model.png', show_shapes=True)
```
At compilation time, we can specify different losses to different ouptuts, by passing the loss functions as a list:
```
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy()])
```
If we only passed a single loss function to the model, the same loss function would be applied to every output, which is not appropriate here.
Likewise for metrics:
```
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy()],
metrics=[[keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError()],
[keras.metrics.CategoricalAccuracy()]])
```
Since we gave names to our output layers, we coud also specify per-output losses and metrics via a dict:
```
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={'score_output': keras.losses.MeanSquaredError(),
'class_output': keras.losses.CategoricalCrossentropy()},
metrics={'score_output': [keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError()],
'class_output': [keras.metrics.CategoricalAccuracy()]})
```
We recommend the use of explicit names and dicts if you have more than 2 outputs.
It's possible to give different weights to different output-specific losses (for instance, one might wish to privilege the "score" loss in our example, by giving to 2x the importance of the class loss), using the `loss_weight` argument:
```
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={'score_output': keras.losses.MeanSquaredError(),
'class_output': keras.losses.CategoricalCrossentropy()},
metrics={'score_output': [keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError()],
'class_output': [keras.metrics.CategoricalAccuracy()]},
loss_weight={'score_output': 2., 'class_output': 1.})
```
You could also chose not to compute a loss for certain outputs, if these outputs meant for prediction but not for training:
```
# List loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()])
# Or dict loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={'class_output': keras.losses.CategoricalCrossentropy()})
```
Passing data to a multi-input or multi-output model in `fit` works in a similar way as specifying a loss function in `compile`:
you can pass *lists of Numpy arrays (with 1:1 mapping to the outputs that received a loss function)* or *dicts mapping output names to Numpy arrays of training data*.
```
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy()])
# Generate dummy Numpy data
img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))
# Fit on lists
model.fit([img_data, ts_data], [score_targets, class_targets],
batch_size=32,
epochs=3)
# Alernatively, fit on dicts
model.fit({'img_input': img_data, 'ts_input': ts_data},
{'score_output': score_targets, 'class_output': class_targets},
batch_size=32,
epochs=3)
```
Here's the Dataset use case: similarly as what we did for Numpy arrays, the Dataset should return
a tuple of dicts.
```
train_dataset = tf.data.Dataset.from_tensor_slices(
({'img_input': img_data, 'ts_input': ts_data},
{'score_output': score_targets, 'class_output': class_targets}))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model.fit(train_dataset, epochs=3)
```
### Using callbacks
Callbacks in Keras are objects that are called at different point during training (at the start of an epoch, at the end of a batch, at the end of an epoch, etc.) and which can be used to implement behaviors such as:
- Doing validation at different points during training (beyond the built-in per-epoch validation)
- Checkpointing the model at regular intervals or when it exceeds a certain accuracy threshold
- Changing the learning rate of the model when training seems to be plateauing
- Doing fine-tuning of the top layers when training seems to be plateauing
- Sending email or instant message notifications when training ends or where a certain performance threshold is exceeded
- Etc.
Callbacks can be passed as a list to your call to `fit`:
```
model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor='val_loss',
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1)
]
model.fit(x_train, y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2)
```
#### Many built-in callbacks are available
- `ModelCheckpoint`: Periodically save the model.
- `EarlyStopping`: Stop training when training is no longer improving the validation metrics.
- `TensorBoard`: periodically write model logs that can be visualized in TensorBoard (more details in the section "Visualization").
- `CSVLogger`: streams loss and metrics data to a CSV file.
- etc.
#### Writing your own callback
You can create a custom callback by extending the base class keras.callbacks.Callback. A callback has access to its associated model through the class property `self.model`.
Here's a simple example saving a list of per-batch loss values during training:
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.losses = []
def on_batch_end(self, batch, logs):
self.losses.append(logs.get('loss'))
```
### Checkpointing models
When you're training model on relatively large datasets, it's crucial to save checkpoints of your model at frequent intervals.
The easiest way to achieve this is with the `ModelCheckpoint` callback:
```
model = get_compiled_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath='mymodel_{epoch}.h5',
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
save_best_only=True,
monitor='val_loss',
verbose=1)
]
model.fit(x_train, y_train,
epochs=3,
batch_size=64,
callbacks=callbacks,
validation_split=0.2)
```
You call also write your own callback for saving and restoring models.
For a complete guide on serialization and saving, see [Guide to Saving and Serializing Models](./saving_and_serializing.ipynb).
### Using learning rate schedules
A common pattern when training deep learning models is to gradually reduce the learning as training progresses. This is generally known as "learning rate decay".
The learning decay schedule could be static (fixed in advance, as a function of the current epoch or the current batch index), or dynamic (responding to the current behavior of the model, in particular the validation loss).
#### Passing a schedule to an optimizer
You can easily use a static learning rate decay schedule by passing a schedule object as the `learning_rate` argument in your optimizer:
```
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
```
Several built-in schedules are available: `ExponentialDecay`, `PiecewiseConstantDecay`, `PolynomialDecay`, and `InverseTimeDecay`.
#### Using callbacks to implement a dynamic learning rate schedule
A dynamic learning rate schedule (for instance, decreasing the learning rate when the validation loss is no longer improving) cannot be achieved with these schedule objects since the optimizer does not have access to validation metrics.
However, callbacks do have access to all metrics, including validation metrics! You can thus achieve this pattern by using a callback that modifies the current learning rate on the optimizer. In fact, this is even built-in as the `ReduceLROnPlateau` callback.
### Visualizing loss and metrics during training
The best way to keep an eye on your model during training is to use [TensorBoard](https://www.tensorflow.org/tensorboard), a browser-based application that you can run locally that provides you with:
- Live plots of the loss and metrics for training and evaluation
- (optionally) Visualizations of the histograms of your layer activations
- (optionally) 3D visualizations of the embedding spaces learned by your `Embedding` layers
If you have installed TensorFlow with pip, you should be able to launch TensorBoard from the command line:
```
tensorboard --logdir=/full_path_to_your_logs
```
#### Using the TensorBoard callback
The easiest way to use TensorBoard with a Keras model and the `fit` method is the `TensorBoard` callback.
In the simplest case, just specify where you want te callback to write logs, and you're good to go:
```python
tensorboard_cbk = keras.callbacks.TensorBoard(log_dir='/full_path_to_your_logs')
model.fit(dataset, epochs=10, callbacks=[tensorboard_cbk])
```
The `TensorBoard` callback has many useful options, including whether to log embeddings, histograms, and how often to write logs:
```python
keras.callbacks.TensorBoard(
log_dir='/full_path_to_your_logs',
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq='epoch') # How often to write logs (default: once per epoch)
```
## Part II: Writing your own training & evaluation loops from scratch
If you want lower-level over your training & evaluation loops than what `fit()` and `evaluate()` provide, you should write your own. It's actually pretty simple! But you should be ready to have a lot more debugging to do on your own.
### Using the GradientTape: a first end-to-end example
Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of the trainable weights of the layer with respect to a loss value. Using an optimizer instance, you can use these gradients to update these variables (which you can retrieve using `model.trainable_variables`).
Let's reuse our initial MNIST model from Part I, and let's train it using mini-batch gradient with a custom training loop.
```
# Get the model.
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy()
# Prepare the training dataset.
batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Iterate over epochs.
for epoch in range(3):
print('Start of epoch %d' % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables autodifferentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_variables)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Log every 200 batches.
if step % 200 == 0:
print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
print('Seen so far: %s samples' % ((step + 1) * 64))
```
### Low-level handling of metrics
Let's add metrics to the mix. You can readily reuse the built-in metrics (or custom ones you wrote) in such training loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_states()` when you need to clear the state of the metric (typically at the end of an epoch)
Let's use this knowledge to compute `SparseCategoricalAccuracy` on validation data at the end of each epoch:
```
# Get model
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy()
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
# Prepare the training dataset.
batch_size = 64
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
# Iterate over epochs.
for epoch in range(3):
print('Start of epoch %d' % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Update training metric.
train_acc_metric(y_batch_train, logits)
# Log every 200 batches.
if step % 200 == 0:
print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
print('Seen so far: %s samples' % ((step + 1) * 64))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print('Training acc over epoch: %s' % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val)
# Update val metrics
val_acc_metric(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print('Validation acc: %s' % (float(val_acc),))
```
### Low-level handling of extra losses
You saw in the previous section that it is possible for regularization losses to be added by a layer by calling `self.add_loss(value)` in the `call` method.
In the general case, you will want to take these losses into account in your custom training loops (unless you've written the model yourself and you already know that it creates no such losses).
Recall this example from the previous section, featuring a layer that creates a regularization loss:
```
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
inputs = keras.Input(shape=(784,), name='digits')
x = layers.Dense(64, activation='relu', name='dense_1')(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = layers.Dense(10, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
When you call a model, like this:
```python
logits = model(x_train)
```
the losses it creates during the forward pass are added to the `model.losses` attribute:
```
logits = model(x_train[:64])
print(model.losses)
```
The tracked losses are first cleared at the start of the model `__call__`, so you will only see the losses created during this one forward pass. For instance, calling the model repeatedly and then querying `losses` only displays the latest losses, created during the last call:
```
logits = model(x_train[:64])
logits = model(x_train[64: 128])
logits = model(x_train[128: 192])
print(model.losses)
```
To take these losses into account during training, all you have to do is to modify your training loop to add `sum(model.losses)` to your total loss:
```
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
for epoch in range(3):
print('Start of epoch %d' % (epoch,))
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train)
loss_value = loss_fn(y_batch_train, logits)
# Add extra losses created during this forward pass:
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Log every 200 batches.
if step % 200 == 0:
print('Training loss (for one batch) at step %s: %s' % (step, float(loss_value)))
print('Seen so far: %s samples' % ((step + 1) * 64))
```
That was the last piece of the puzzle! You've reached the end of this guide.
Now you know everything there is to know about using built-in training loops and writing your own from scratch.
|
github_jupyter
|
# Inverted Pendulum: Reinforcement learning
Meichen Lu ([email protected]) 26th April 2018
Source: CS229: PS4Q6
Starting code: http://cs229.stanford.edu/ps/ps4/q6/
Reference: https://github.com/zyxue/stanford-cs229/blob/master/Problem-set-4/6-reinforcement-learning-the-inverted-pendulum/control.py
```
from cart_pole import CartPole, Physics
import numpy as np
from scipy.signal import lfilter
import matplotlib.pyplot as plt
%matplotlib inline
# Simulation parameters
pause_time = 0.0001
min_trial_length_to_start_display = 100
display_started = min_trial_length_to_start_display == 0
NUM_STATES = 163
NUM_ACTIONS = 2
GAMMA = 0.995
TOLERANCE = 0.01
NO_LEARNING_THRESHOLD = 20
# Time cycle of the simulation
time = 0
# These variables perform bookkeeping (how many cycles was the pole
# balanced for before it fell). Useful for plotting learning curves.
time_steps_to_failure = []
num_failures = 0
time_at_start_of_current_trial = 0
# You should reach convergence well before this
max_failures = 500
# Initialize a cart pole
cart_pole = CartPole(Physics())
# Starting `state_tuple` is (0, 0, 0, 0)
# x, x_dot, theta, theta_dot represents the actual continuous state vector
x, x_dot, theta, theta_dot = 0.0, 0.0, 0.0, 0.0
state_tuple = (x, x_dot, theta, theta_dot)
# `state` is the number given to this state, you only need to consider
# this representation of the state
state = cart_pole.get_state(state_tuple)
# if min_trial_length_to_start_display == 0 or display_started == 1:
# cart_pole.show_cart(state_tuple, pause_time)
# Perform all your initializations here:
# Assume no transitions or rewards have been observed.
# Initialize the value function array to small random values (0 to 0.10,
# say).
# Initialize the transition probabilities uniformly (ie, probability of
# transitioning for state x to state y using action a is exactly
# 1/NUM_STATES).
# Initialize all state rewards to zero.
###### BEGIN YOUR CODE ######
V_s = np.random.rand(NUM_STATES)
P_sa = np.ones((NUM_STATES,NUM_ACTIONS, NUM_STATES))/NUM_STATES
R_s = np.zeros((NUM_STATES))
# Initialise intermediate variables
state_transition_count = np.zeros((NUM_STATES,NUM_ACTIONS, NUM_STATES))
new_state_count = np.zeros(NUM_STATES)
R_new_state = np.zeros(NUM_STATES)
###### END YOUR CODE ######
# This is the criterion to end the simulation.
# You should change it to terminate when the previous
# 'NO_LEARNING_THRESHOLD' consecutive value function computations all
# converged within one value function iteration. Intuitively, it seems
# like there will be little learning after this, so end the simulation
# here, and say the overall algorithm has converged.
consecutive_no_learning_trials = 0
while consecutive_no_learning_trials < NO_LEARNING_THRESHOLD:
# Write code to choose action (0 or 1).
# This action choice algorithm is just for illustration. It may
# convince you that reinforcement learning is nice for control
# problems!Replace it with your code to choose an action that is
# optimal according to the current value function, and the current MDP
# model.
###### BEGIN YOUR CODE ######
# TODO:
action = np.argmax(np.sum(P_sa[state]*V_s, axis = 1))
###### END YOUR CODE ######
# Get the next state by simulating the dynamics
state_tuple = cart_pole.simulate(action, state_tuple)
# Increment simulation time
time = time + 1
# Get the state number corresponding to new state vector
new_state = cart_pole.get_state(state_tuple)
# if display_started == 1:
# cart_pole.show_cart(state_tuple, pause_time)
# reward function to use - do not change this!
if new_state == NUM_STATES - 1:
R = -1
else:
R = 0
# Perform model updates here.
# A transition from `state` to `new_state` has just been made using
# `action`. The reward observed in `new_state` (note) is `R`.
# Write code to update your statistics about the MDP i.e. the
# information you are storing on the transitions and on the rewards
# observed. Do not change the actual MDP parameters, except when the
# pole falls (the next if block)!
###### BEGIN YOUR CODE ######
# record the number of times `state, action, new_state` occurs
state_transition_count[state, action, new_state] += 1
# record the rewards for every `new_state`
R_new_state[new_state] += R
# record the number of time `new_state` was reached
new_state_count[new_state] += 1
###### END YOUR CODE ######
# Recompute MDP model whenever pole falls
# Compute the value function V for the new model
if new_state == NUM_STATES - 1:
# Update MDP model using the current accumulated statistics about the
# MDP - transitions and rewards.
# Make sure you account for the case when a state-action pair has never
# been tried before, or the state has never been visited before. In that
# case, you must not change that component (and thus keep it at the
# initialized uniform distribution).
###### BEGIN YOUR CODE ######
# TODO:
sum_state = np.sum(state_transition_count, axis = 2)
mask = sum_state > 0
P_sa[mask] = state_transition_count[mask]/sum_state[mask].reshape(-1, 1)
# Update reward function
mask = new_state_count>0
R_s[mask] = R_new_state[mask]/new_state_count[mask]
###### END YOUR CODE ######
# Perform value iteration using the new estimated model for the MDP.
# The convergence criterion should be based on `TOLERANCE` as described
# at the top of the file.
# If it converges within one iteration, you may want to update your
# variable that checks when the whole simulation must end.
###### BEGIN YOUR CODE ######
iter = 0
tol = 1
while tol > TOLERANCE:
V_old = V_s
V_s = R_s + GAMMA * np.max(np.sum(P_sa*V_s, axis = 2), axis = 1)
tol = np.max(np.abs(V_s - V_old))
iter = iter + 1
if iter == 1:
consecutive_no_learning_trials += 1
else:
# Reset
consecutive_no_learning_trials = 0
###### END YOUR CODE ######
# Do NOT change this code: Controls the simulation, and handles the case
# when the pole fell and the state must be reinitialized.
if new_state == NUM_STATES - 1:
num_failures += 1
if num_failures >= max_failures:
break
print('[INFO] Failure number {}'.format(num_failures))
time_steps_to_failure.append(time - time_at_start_of_current_trial)
# time_steps_to_failure[num_failures] = time - time_at_start_of_current_trial
time_at_start_of_current_trial = time
if time_steps_to_failure[num_failures - 1] > min_trial_length_to_start_display:
display_started = 1
# Reinitialize state
# x = 0.0
x = -1.1 + np.random.uniform() * 2.2
x_dot, theta, theta_dot = 0.0, 0.0, 0.0
state_tuple = (x, x_dot, theta, theta_dot)
state = cart_pole.get_state(state_tuple)
else:
state = new_state
# plot the learning curve (time balanced vs. trial)
log_tstf = np.log(np.array(time_steps_to_failure))
plt.plot(np.arange(len(time_steps_to_failure)), log_tstf, 'k')
window = 30
w = np.array([1/window for _ in range(window)])
weights = lfilter(w, 1, log_tstf)
x = np.arange(window//2, len(log_tstf) - window//2)
plt.plot(x, weights[window:len(log_tstf)], 'r--')
plt.xlabel('Num failures')
plt.ylabel('Num steps to failure')
plt.show()
```
|
github_jupyter
|
# Variable Relationship Tests (correlation)
- Pearson’s Correlation Coefficient
- Spearman’s Rank Correlation
- Kendall’s Rank Correlation
- Chi-Squared Test
## Correlation Test
Correlation Measures whether greater values of one variable correspond to greater values in the other. Scaled to always lie between +1 and −1
- Correlation is Positive when the values increase together.
- Correlation is Negative when one value decreases as the other increases.
- A correlation is assumed to be linear.
- 1 is a perfect positive correlation
- 0 is no correlation (the values don’t seem linked at all)
- -1 is a perfect negative correlation
## Correlation Methods
- **Pearson's Correlation Test:** assumes the data is normally distributed and measures linear correlation.
- **Spearman's Correlation Test:** does not assume normality and measures non-linear correlation.
- **Kendall's Correlation Test:** similarly does not assume normality and measures non-linear correlation, but it less commonly used.
## Difference Between Pearson's and Spearman's
Pearson's Test | Spearman's Test
---------------|----------------
Paramentric Correlation | Non-parametric
Linear relationship | Non-linear relationship
Continuous variables | continuous or ordinal variables
Propotional change | Change not at constant rate
```
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
sns.set(font_scale=2, palette= "viridis")
from sklearn.preprocessing import scale
import researchpy as rp
from scipy import stats
data = pd.read_csv('../data/pulse_data.csv')
data.head()
```
## Pearson’s Correlation Coefficient
Tests whether two samples have a linear relationship.
### Assumptions
- Observations in each sample are independent and identically distributed (iid).
- Observations in each sample are normally distributed.
- Observations in each sample have the same variance.
### Interpretation
- H0: There is a relationship between two variables
- Ha: There is no relationship between two variables
__Question: Is there any relationship between height and weight?__
```
data.Height.corr(data.Weight)
data.Height.corr(data.Weight, method="pearson")
data.Height.corr(data.Weight, method="spearman")
plt.figure(figsize=(10,8))
sns.scatterplot(data=data, x='Height', y="Weight")
plt.show()
plt.figure(figsize=(10,8))
sns.regplot(data=data, x='Height', y="Weight")
plt.show()
stat, p_value = stats.shapiro(data['Height'])
print(f'statistic = {stat}, p-value = {p_value}')
alpha = 0.05
if p_value > alpha:
print("The sample has normal distribution(Fail to reject the null hypothesis, the result is not significant)")
else:
print("The sample does not have a normal distribution(Reject the null hypothesis, the result is significant)")
stat, p_value = stats.shapiro(data['Weight'])
print(f'statistic = {stat}, p-value = {p_value}')
alpha = 0.05
if p_value > alpha:
print("The sample has normal distribution(Fail to reject the null hypothesis, the result is not significant)")
else:
print("The sample does not have a normal distribution(Reject the null hypothesis, the result is significant)")
# Checking for normality by Q-Q plot graph
plt.figure(figsize=(12, 8))
stats.probplot(data['Height'], plot=plt, dist='norm')
plt.show()
# Checking for normality by Q-Q plot graph
plt.figure(figsize=(12, 8))
stats.probplot(data['Weight'], plot=plt, dist='norm')
plt.show()
stats.levene(data['Height'], data['Weight'])
stat, p, = stats.levene(data['Height'], data['Weight'])
print(f'stat={stat}, p-value={p}')
alpha = 0.05
if p > alpha:
print('The variances are equal between two variables(reject H0, not significant)')
else:
print('The variances are not equal between two variables(reject H0, significant)')
stats.pearsonr(data['Height'], data['Weight'])
stat, p, = stats.pearsonr(data['Height'], data['Weight'])
print(f'stat={stat}, p-value={p}')
alpha = 0.05
if p > alpha:
print('There is a relationship between two variables(fail to reject H0, not significant)')
else:
print('There is a no relationship between two variables(reject H0, significant)')
```
## Spearman’s Rank Correlation Test
Tests whether two samples have a monotonic relationship.
### Assumptions
- Observations in each sample are independent and identically distributed (iid).
- Observations in each sample can be ranked.
### Interpretation
- **H0 hypothesis:** There is is relationship between variable 1 and variable 2
- **H1 hypothesis:** There is no relationship between variable 1 and variable 2
```
stats.spearmanr(data['Height'], data['Weight'])
stat, p = stats.spearmanr(data['Height'], data['Weight'])
print(f'stat={stat}, p-value={p}')
alpha = 0.05
if p > alpha:
print('There is a relationship between two variables(fail to reject H0, not significant)')
else:
print('There is a no relationship between two variables(reject H0, significant)')
```
## Kendall’s Rank Correlation Test
### Assumptions
- Observations in each sample are independent and identically distributed (iid).
- Observations in each sample can be ranked.
### Interpretation
### Interpretation
- **H0 hypothesis:** There is a relationship between variable 1 and variable 2
- **H1 hypothesis:** There is no relationship between variable 1 and variable 2
```
stats.spearmanr(data['Height'], data['Weight'])
stat, p, = stats.kendalltau(data['Height'], data['Weight'])
print(f'stat={stat}, p-value={p}')
alpha = 0.05
if p > alpha:
print('Accept null hypothesis; there is a relationship between Height and Weight(fail to reject H0, not significant)')
else:
print('Reject the null hypothesis; there is no relationship between Height and Weight (reject H0, significant)')
```
## Chi-Squared Test
- The Chi-square test of independence tests if there is a significant relationship between two categorical variables
- The test is comparing the observed observations to the expected observations.
- The data is usually displayed in a cross-tabulation format with each row representing a category for one variable and each column representing a category for another variable.
- Chi-square test of independence is an omnibus test. Meaning it tests the data as a whole. This means that one will not be able to tell which levels (categories) of the variables are responsible for the relationship if the Chi-square table is larger than 2×2
- If the test is larger than 2×2, it requires post hoc testing. If this doesn’t make much sense right now, don’t worry. Further explanation will be provided when we start working with the data.
### Assumptions
- It should be two categorical variables(e.g; Gender)
- Each variables should have at leats two groups(e.g; Gender = Female or Male)
- There should be independence of observations(between and within subjects)
- Large sample size
- The expected frequencies should be at least 1 for each cell.
- The expected frequencies for the majority(80%) of the cells should be at least 5.
If the sample size is small, we have to use **Fisher's Exact Test**
**Fisher's Exact Test** is similar to Chi-squared test, but it is used for small-sized samples.
## Interpretation
- The H0 (Null Hypothesis): There is a relationship between variable one and variable two.
- The Ha (Alternative Hypothesis): There is no relationship between variable 1 and variable 2.
### Contingency Table
Contingency table is a table with at least two rows and two columns(2x2) and its use to present categorical data in terms of frequency counts.
```
data = pd.read_csv('../data/KosteckiDillon.csv', usecols=['id', 'time', 'dos', 'hatype', 'age', 'airq',
'medication', 'headache', 'sex'])
data.head()
table = pd.crosstab(data['sex'], data['headache'])
table
stats.chi2_contingency(table)
stat, p, dof, expected = stats.chi2_contingency(table)
print(f'stat={stat}, p-value={p}')
alpha = 0.05
if p > alpha:
print('There is a relationship between sex and headache(fail to reject Ho, not significant)')
else:
print('There is no relationship between sex and headache.(reject H0, significant)')
```
## Fisher’s Test
```
stat, p, = stats.fisher_exact(table)
print(f'stat={stat}, p-value={p}')
if p > 0.05:
print('Probably independent')
else:
print('Probably dependent')
```
|
github_jupyter
|
# Views
- Views are nothing but widget only but having capability to hold widgets.
```
from webdriver_kaifuku import BrowserManager
from widgetastic.widget import Browser
command_executor = "http://localhost:4444/wd/hub"
config = {
"webdriver": "Remote",
"webdriver_options":
{"desired_capabilities": {"browserName": "firefox"},
"command_executor": command_executor,
}
}
mgr = BrowserManager.from_conf(config)
sel = mgr.ensure_open()
class MyBrowser(Browser):
pass
browser = MyBrowser(selenium=sel)
browser.url = "http://0.0.0.0:8000/test_page.html"
from widgetastic.widget import View, Text, TextInput, Checkbox, ColourInput, Select
# Example-1
class BasicWidgetView(View):
text_input = TextInput(id="text_input")
checkbox = Checkbox(id="checkbox_input")
button = Text(locator=".//button[@id='a_button']")
color_input = ColourInput(id="color_input")
view = BasicWidgetView(browser)
```
### Nested Views
```
# Example-2
class MyNestedView(View):
@View.nested
class basic(View): #noqa
text_input = TextInput(id="text_input")
checkbox = Checkbox(id="checkbox_input")
@View.nested
class conditional(View):
select_input = Select(id="select_lang")
view = MyNestedView(browser)
view.fill({'basic': {'text_input': 'hi', 'checkbox': True},
'conditional': {'select_input': 'Go'}})
# Example-3
class Basic(View):
text_input = TextInput(id="text_input")
checkbox = Checkbox(id="checkbox_input")
class Conditional(View):
select_input = Select(id="select_lang")
class MyNestedView(View):
basic = View.nested(Basic)
conditional = View.nested(Conditional)
view = MyNestedView(browser)
view.read()
```
### Switchable Conditional Views
```
from widgetastic.widget import ConditionalSwitchableView
# Example-4: Switchable widgets
class MyConditionalWidgetView(View):
select_input = Select(id="select_lang")
lang_label = ConditionalSwitchableView(reference="select_input")
lang_label.register("Python", default=True, widget=Text(locator=".//h3[@id='lang-1']"))
lang_label.register("Go", widget=Text(locator=".//h3[@id='lang-2']"))
view = MyConditionalWidgetView(browser)
# Example-5: Switchable Views
class MyConditionalView(View):
select_input = Select(id="select_lang")
lang = ConditionalSwitchableView(reference="select_input")
@lang.register("Python", default=True)
class PythonView(View):
# some more widgets
lang_label = Text(locator=".//h3[@id='lang-1']")
@lang.register("Go")
class GoView(View):
lang_label = Text(locator=".//h3[@id='lang-2']")
view = MyConditionalView(browser)
```
### Parametrized Views
```
from widgetastic.widget import ParametrizedView
from widgetastic.utils import ParametrizedLocator
# Example-6
class MyParametrizedView(ParametrizedView):
PARAMETERS = ('name',)
ROOT = ParametrizedLocator(".//div[contains(label, {name|quote})]")
widget = Checkbox(locator=".//input")
view = MyParametrizedView(browser, additional_context={'name': 'widget 1'})
# Example-7: Nested Parametrized View
class MyNestedParametrizedView(View):
@View.nested
class widget_selector(ParametrizedView):
PARAMETERS = ('name',)
ROOT = ParametrizedLocator(".//div[contains(label, {name|quote})]")
widget = Checkbox(locator=".//input")
view = MyNestedParametrizedView(browser)
```
|
github_jupyter
|
```
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem import PorterStemmer
from autocorrect import spell
import os
from six.moves import cPickle
import re
MAX_LEN = 25
BATCH_SIZE = 64
stemmer = PorterStemmer()
def process_str(string, bot_input=False, bot_output=False):
string = string.strip().lower()
string = re.sub(r"[^A-Za-z0-9(),!?\'\`:]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\s{2,}", " ", string)
string = string.split(" ")
string = [re.sub(r"[0-9]+", "NUM", token) for token in string]
string = [stemmer.stem(re.sub(r'(.)\1+', r'\1\1', token)) for token in string]
string = [spell(token).lower() for token in string]
# Truncate string
while True:
try:
string.remove("")
except:
break
if(not bot_input and not bot_output):
string = string[0:MAX_LEN]
elif(bot_input):
string = string[0:MAX_LEN-1]
string.insert(0, "</start>")
else:
string = string[0:MAX_LEN-1]
string.insert(len(string), "</end>")
old_len = len(string)
for i in range((MAX_LEN) - len(string)):
string.append(" </pad> ")
string = re.sub("\s+", " ", " ".join(string)).strip()
return string, old_len
imported_graph = tf.train.import_meta_graph('checkpoints/best_validation.meta')
sess = tf.InteractiveSession()
imported_graph.restore(sess, "checkpoints/best_validation")
sess.run(tf.tables_initializer())
graph = tf.get_default_graph()
def test(text):
text, text_len = process_str(text)
text = [text] + ["hi"] * (BATCH_SIZE-1)
text_len = [text_len] + [1] * (BATCH_SIZE-1)
return text, text_len
test_init_op = graph.get_operation_by_name('data/dataset_init')
user_ph = graph.get_tensor_by_name("user_placeholder:0")
bot_inp_ph = graph.get_tensor_by_name("bot_inp_placeholder:0")
bot_out_ph = graph.get_tensor_by_name("bot_out_placeholder:0")
user_lens_ph = graph.get_tensor_by_name("user_len_placeholder:0")
bot_inp_lens_ph = graph.get_tensor_by_name("bot_inp_lens_placeholder:0")
bot_out_lens_ph = graph.get_tensor_by_name("bot_out_lens_placeholder:0")
words = graph.get_tensor_by_name("inference/words:0")
def chat(text):
user, user_lens = test(text)
sess.run(test_init_op, feed_dict={
user_ph: user,
bot_inp_ph: ["hi"] * BATCH_SIZE,
bot_out_ph: ["hi"] * BATCH_SIZE,
user_lens_ph: user_lens,
bot_inp_lens_ph: [1] * BATCH_SIZE,
bot_out_lens_ph: [1] * BATCH_SIZE
})
translations_text = sess.run(words)
output = [item.decode() for item in translations_text[0]]
if("</end>" in output):
end_idx = output.index("</end>")
output = output[0:end_idx]
output = " ".join(output)
print("BOT: " + output)
while True:
chat(input())
```
|
github_jupyter
|
# Testinnsening av upersonlig skattemelding med næringspesifikasjon
Denne demoen er ment for å vise hvordan flyten for et sluttbrukersystem kan hente et utkast, gjøre endringer, validere/kontrollere det mot Skatteetatens apier, for å sende det inn via Altinn3.
```
try:
from altinn3 import *
from skatteetaten_api import main_relay, base64_decode_response, decode_dokument
import requests
import base64
import xmltodict
import xml.dom.minidom
from pathlib import Path
except ImportError as e:
print("Mangler en avhengighet, installer dem via pip")
!pip install python-jose
!pip install xmltodict
!pip install pathlib
import xmltodict
from skatteetaten_api import main_relay, base64_decode_response, decode_dokument
#hjelpe metode om du vil se en request printet som curl
def print_request_as_curl(r):
command = "curl -X {method} -H {headers} -d '{data}' '{uri}'"
method = r.request.method
uri = r.request.url
data = r.request.body
headers = ['"{0}: {1}"'.format(k, v) for k, v in r.request.headers.items()]
headers = " -H ".join(headers)
print(command.format(method=method, headers=headers, data=data, uri=uri))
idporten_header = main_relay()
```
# Hent utkast og gjeldende
Her legger vi inn fødselsnummeret vi logget oss inn med, Dersom du velger et annet fødselsnummer så må den du logget på med ha tilgang til skattemeldingen du ønsker å hente
#### Parten nedenfor er brukt for demostrasjon, pass på bruk deres egne testparter når dere tester
01014701377 er daglig leder for 811422762
```
s = requests.Session()
s.headers = dict(idporten_header)
fnr="01014701377" #oppdater med test fødselsnummerene du har fått tildelt
orgnr_as = "811423262"
```
### Utkast
```
url_utkast = f'https://mp-test.sits.no/api/skattemelding/v2/utkast/2021/{orgnr_as}'
r = s.get(url_utkast)
r
```
### Gjeldende
```
url_gjeldende = f'https://mp-test.sits.no/api/skattemelding/v2/2021/{orgnr_as}'
r_gjeldende = s.get(url_gjeldende)
r_gjeldende
```
## Fastsatt
Her får en _http 404_ om vedkommende ikke har noen fastsetting, rekjørt denne etter du har sendt inn og fått tilbakemdling i Altinn at den har blitt behandlet, du skal nå ha en fastsatt skattemelding om den har blitt sent inn som Komplett
```
url_fastsatt = f'https://mp-test.sits.no/api/skattemelding/v2/fastsatt/2021/{orgnr_as}'
r_fastsatt = s.get(url_fastsatt)
r_fastsatt
r_fastsatt.headers
```
## Svar fra hent gjeldende
### Gjeldende dokument referanse:
I responsen på alle api kallene, være seg utkast/fastsatt eller gjeldene, så følger det med en dokumentreferanse.
For å kalle valider tjenesten, er en avhengig av å bruke korrekt referanse til gjeldende skattemelding.
Cellen nedenfor henter ut gjeldende dokumentrefranse printer ut responsen fra hent gjeldende kallet
```
sjekk_svar = r_gjeldende
sme_og_naering_respons = xmltodict.parse(sjekk_svar.text)
skattemelding_base64 = sme_og_naering_respons["skattemeldingOgNaeringsspesifikasjonforespoerselResponse"]["dokumenter"]["skattemeldingdokument"]
sme_base64 = skattemelding_base64["content"]
dokref = sme_og_naering_respons["skattemeldingOgNaeringsspesifikasjonforespoerselResponse"]["dokumenter"]['skattemeldingdokument']['id']
decoded_sme_xml = decode_dokument(skattemelding_base64)
sme_utkast = xml.dom.minidom.parseString(decoded_sme_xml["content"]).toprettyxml()
print(f"Responsen fra hent gjeldende ser slik ut, gjeldende dokumentrerefanse er {dokref}")
print(sjekk_svar.request.method ,sjekk_svar.request.url)
print(xml.dom.minidom.parseString(sjekk_svar.text).toprettyxml())
#Legg merge til dokumenter.dokument.type = skattemeldingUpersonlig
with open("../../../src/resources/eksempler/v2/Naeringspesifikasjon-enk-v2.xml", 'r') as f:
naering_as_xml = f.read()
innsendingstype = "komplett"
naeringsspesifikasjoner_as_b64 = base64.b64encode(naering_as_xml.encode("utf-8"))
naeringsspesifikasjoner_as_b64 = str(naeringsspesifikasjoner_as_b64.decode("utf-8"))
naeringsspesifikasjoner_base64=naeringsspesifikasjoner_as_b64
dok_ref=dokref
valider_konvlutt_v2 = """
<?xml version="1.0" encoding="utf-8" ?>
<skattemeldingOgNaeringsspesifikasjonRequest xmlns="no:skatteetaten:fastsetting:formueinntekt:skattemeldingognaeringsspesifikasjon:request:v2">
<dokumenter>
<dokument>
<type>skattemeldingUpersonlig</type>
<encoding>utf-8</encoding>
<content>{sme_base64}</content>
</dokument>
<dokument>
<type>naeringsspesifikasjon</type>
<encoding>utf-8</encoding>
<content>{naeringsspeifikasjon_base64}</content>
</dokument>
</dokumenter>
<dokumentreferanseTilGjeldendeDokument>
<dokumenttype>skattemeldingPersonlig</dokumenttype>
<dokumentidentifikator>{dok_ref}</dokumentidentifikator>
</dokumentreferanseTilGjeldendeDokument>
<inntektsaar>2021</inntektsaar>
<innsendingsinformasjon>
<innsendingstype>{innsendingstype}</innsendingstype>
<opprettetAv>TurboSkatt</opprettetAv>
</innsendingsinformasjon>
</skattemeldingOgNaeringsspesifikasjonRequest>
""".replace("\n","")
naering_enk = valider_konvlutt_v2.format(sme_base64=sme_base64,
naeringsspeifikasjon_base64=naeringsspesifikasjoner_base64,
dok_ref=dok_ref,
innsendingstype=innsendingstype)
```
# Valider utkast sme med næringsopplysninger
```
def valider_sme(payload):
url_valider = f'https://mp-test.sits.no/api/skattemelding/v2/valider/2021/{orgnr_as}'
header = dict(idporten_header)
header["Content-Type"] = "application/xml"
return s.post(url_valider, headers=header, data=payload)
valider_respons = valider_sme(naering_enk)
resultatAvValidering = xmltodict.parse(valider_respons.text)["skattemeldingerOgNaeringsspesifikasjonResponse"]["resultatAvValidering"]
if valider_respons:
print(resultatAvValidering)
print()
print(xml.dom.minidom.parseString(valider_respons.text).toprettyxml())
else:
print(valider_respons.status_code, valider_respons.headers, valider_respons.text)
```
# Altinn 3
1. Hent Altinn Token
2. Oppretter en ny instans av skjemaet
3. lasteropp metadata til skjemaet
4. last opp vedlegg til skattemeldingen
5. oppdater skattemelding xml med referanse til vedlegg_id fra altinn3.
6. laster opp skattemeldingen og næringsopplysninger som et vedlegg
```
#1
altinn3_applikasjon = "skd/formueinntekt-skattemelding-v2"
altinn_header = hent_altinn_token(idporten_header)
#2
instans_data = opprett_ny_instans(altinn_header, fnr, appnavn=altinn3_applikasjon)
```
### 3 Last opp metadata (skattemelding_V1)
```
print(f"innsendingstypen er satt til: {innsendingstype}")
req_metadata = last_opp_metadata_json(instans_data, altinn_header, inntektsaar=2021, appnavn=altinn3_applikasjon)
req_metadata
```
## Last opp skattemelding
```
#Last opp skattemeldingen
req_send_inn = last_opp_skattedata(instans_data, altinn_header,
xml=naering_enk,
data_type="skattemeldingOgNaeringspesifikasjon",
appnavn=altinn3_applikasjon)
req_send_inn
```
### Sett statusen klar til henting av skatteetaten.
```
req_bekreftelse = endre_prosess_status(instans_data, altinn_header, "next", appnavn=altinn3_applikasjon)
req_bekreftelse = endre_prosess_status(instans_data, altinn_header, "next", appnavn=altinn3_applikasjon)
req_bekreftelse
```
### Framtidig: Sjekk status på altinn3 instansen om skatteetaten har hentet instansen.
### Se innsending i Altinn
Ta en slurk av kaffen og klapp deg selv på ryggen, du har nå sendt inn, la byråkratiet gjøre sin ting... og det tar litt tid. Pt så sjekker skatteeaten hos Altinn3 hvert 5 min om det har kommet noen nye innsendinger.
|
github_jupyter
|
# Keras tutorial - the Happy House
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
```
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
```
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
## 1 - The Happy House
For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
<img src="images/happy-house.jpg" style="width:350px;height:270px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
<img src="images/house-members.png" style="width:550px;height:250px;">
Run the following code to normalize the dataset and learn about its shapes.
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Details of the "Happy" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures
It is now time to solve the "Happy" Challenge.
## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```
Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
**Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
```
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool')(X)
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
model = Model(inputs = X_input, outputs = X, name='HappyModel')
### END CODE HERE ###
return model
```
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
**Exercise**: Implement step 1, i.e. create the model.
```
### START CODE HERE ### (1 line)
happyModel = HappyModel((64, 64, 3))
### END CODE HERE ###
```
**Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
```
### START CODE HERE ### (1 line)
happyModel.compile(optimizer='adam', loss='binary_crossentropy', metrics = ["accuracy"])
### END CODE HERE ###
```
**Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
```
### START CODE HERE ### (1 line)
happyModel.fit(X_train, Y_train, epochs=45, batch_size=16)
### END CODE HERE ###
```
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
**Exercise**: Implement step 4, i.e. test/evaluate the model.
```
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(X_test, Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
- Try using blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find Adam works well.
- If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
- Run on more epochs, until you see the train accuracy plateauing.
Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
## 3 - Conclusion
Congratulations, you have solved the Happy House challenge!
Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
<font color='blue'>
**What we would like you to remember from this assignment:**
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
- Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
```
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
```
## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.
```
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
```
|
github_jupyter
|
# How do ratings behave after users have seen many captions?
This notebook looks at the "vote decay" of users. The New Yorker caption contest organizer, Bob Mankoff, has received many emails like the one below (name/personal details left out for anonymity)
> Here's my issue.
>
> First time I encounter something, I might say it's funny.
>
> Then it comes back in many forms over and over and it's no longer funny and I wish I could go back to the first one and say it's not funny.
>
> But it's funny, and then I can't decide whether to credit everyone with funny or keep hitting unfunny. What I really like to find out is who submitted it first, but often it's slightly different and there may be a best version. Auggh!
>
> How should we do this???
We can investigate this: we have all the data at hand. We record the timestamp, participant ID and their rating for a given caption. So let's see how votes go after a user has seen $n$ captions!
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import caption_contest_data as ccd
```
## Reading in data
Let's read in the data. As the last column can contain a non-escaped comma, we have to fix that before doing any analysis.
Note that two versions of this notebook exist (the previous notebook can be found in [43bc5d]). This highlights some of the differences required to read in the earlier datasets.
[43bc5d]:https://github.com/nextml/caption-contest-data/commit/43bc5d23ee287b8b34cc4eb0181484bd21bbd341
```
contest = 540
responses = ccd.responses(contest)
print(len(responses))
responses.head()
```
## Seeing how many captions a user has seen
This is the workhorse of the notebook: it sees how many captions one participant has seen. I sorted by timestamp (and with an actual timestamp, not a str) to collect the ratings in the order a user has seen. I do not assume that only one user answers at a time.
```
last_id = None
i = 0
num_responses = []
captions_seen = []
responses = responses.sort_values(by='timestamp_query_generated')
# responses = responses[0:1000] # debug
captions_seen_by = {}
captions_seen = []
for _, response in responses.iterrows():
id_, rating = response['participant_uid'], response['target_reward']
if id_ not in captions_seen_by:
captions_seen_by[id_] = 0
captions_seen_by[id_] += 1
captions_seen += [captions_seen_by[id_]]
num_responses += [i]
responses['number of captions seen'] = captions_seen
responses.head()
```
## Viewing the data
Now let's format the data to view it. We can view the data in two ways: as we only have three rating values, we can view the probability of a person rating 1, 2 or 3, and can also view the mean.
In this, we rely on `pd.pivot_table`. This can take DataFrame that looks like a list of dictionaries and compute `aggfunc` (by default `np.mean`) for all items that contain common keys (indicated by `index` and `columns`). It's similar to Excel's pivot table functionality.
### Probability of rating {1, 2, 3}
```
def prob(x):
n = len(x)
ret = {'n': n}
ret.update({name: np.sum(x == i) for name, i in [('unfunny', 1),
('somewhat funny', 2),
('funny', 3)]})
return ret
probs = responses.pivot_table(index='number of captions seen',
columns='alg_label', values='target_reward',
aggfunc=prob)
probs.head()
d = {label: dict(probs[label]) for label in ['RandomSampling']}
for label in d.keys():
for n in d[label].keys():
if d[label][n] is None:
continue
for rating in ['unfunny', 'somewhat funny', 'funny']:
d[label][n][rating] = d[label][n][rating] / d[label][n]['n']
df = pd.DataFrame(d['RandomSampling']).T
df = pd.concat({'RandomSampling': df}, axis=1)
df.head()
plt.style.use("default")
fig, axs = plt.subplots(figsize=(8, 4), ncols=2)
alg = "RandomSampling"
show = df[alg].copy()
show["captions seen"] = show.index
for y in ["funny", "somewhat funny", "unfunny"]:
show.plot(x="captions seen", y=y, ax=axs[0])
show.plot(x="captions seen", y="n", ax=axs[1])
for ax in axs:
ax.set_xlim(0, 100)
ax.grid(linestyle='--', alpha=0.5)
plt.style.use("default")
def plot(alg):
fig = plt.figure(figsize=(10, 5))
ax = plt.subplot(1, 2, 1)
df[alg][['unfunny', 'somewhat funny', 'funny']].plot(ax=ax)
plt.xlim(0, 100)
plt.title('{} ratings\nfor contest {}'.format(alg, contest))
plt.ylabel('Probability of rating')
plt.xlabel('Number of captions seen')
plt.grid(linestyle="--", alpha=0.6)
ax = plt.subplot(1, 2, 2)
df[alg]['n'].plot(ax=ax, logy=False)
plt.ylabel('Number of users')
plt.xlabel('Number of captions seen, $n$')
plt.title('Number of users that have\nseen $n$ captions')
plt.xlim(0, 100)
plt.grid(linestyle="--", alpha=0.6)
for alg in ['RandomSampling']:
fig = plot(alg)
plt.show()
```
|
github_jupyter
|
# 使用预训练的词向量完成文本分类任务
**作者**: [fiyen](https://github.com/fiyen)<br>
**日期**: 2021.10<br>
**摘要**: 本示例教程将会演示如何使用飞桨内置的Imdb数据集,并使用预训练词向量进行文本分类。
## 一、环境设置
本教程基于Paddle 2.2.0-rc0 编写,如果你的环境不是本版本,请先参考官网[安装](https://www.paddlepaddle.org.cn/install/quick) Paddle 2.2.0-rc0。
```
import paddle
from paddle.io import Dataset
import numpy as np
import paddle.text as text
import random
print(paddle.__version__)
```
## 二、数据载入
在这个示例中,将使用 Paddle 2.2.0-rc0 完成针对 Imdb 数据集(电影评论情感二分类数据集)的分类训练和测试。Imdb 将直接调用自 Paddle 2.2.0-rc0,同时,
利用预训练的词向量([GloVe embedding](http://nlp.stanford.edu/projects/glove/))完成任务。
```
print('自然语言相关数据集:', paddle.text.__all__)
```
由于 Paddle 2.2.0-rc0 提供了经过处理的Imdb数据集,可以方便地调用所需要的数据实例,省去了数据预处理的麻烦。目前, Paddle 2.2.0-rc0 以及内置的高质量
数据集包括 Conll05st、Imdb、Imikolov、Movielens、HCIHousing、WMT14 和 WMT16 等,未来还将提供更多常用数据集的调用接口。
以下定义了调用 imdb 训练集合测试集的方法。其中,cutoff 定义了构建词典的截止大小,即数据集中出现频率在 cutoff 以下的不予考虑;mode 定义了返回的数据用于何种用途(test: 测试集,train: 训练集)。
### 2.1 定义数据集
```
imdb_train = text.Imdb(mode='train', cutoff=150)
imdb_test = text.Imdb(mode='test', cutoff=150)
```
调用 Imdb 得到的是经过编码的数据集,每个 term 对应一个唯一 id,映射关系可以通过 imdb_train.word_idx 查看。将每一个样本即一条电影评论,表示成 id 序列。可以检查一下以上生成的数据内容:
```
print("训练集样本数量: %d; 测试集样本数量: %d" % (len(imdb_train), len(imdb_test)))
print(f"样本标签: {set(imdb_train.labels)}")
print(f"样本字典: {list(imdb_train.word_idx.items())[:10]}")
print(f"单个样本: {imdb_train.docs[0]}")
print(f"最小样本长度: {min([len(x) for x in imdb_train.docs])};最大样本长度: {max([len(x) for x in imdb_train.docs])}")
```
对于训练集,将数据的顺序打乱,以优化将要进行的分类模型训练的效果。
```
shuffle_index = list(range(len(imdb_train)))
random.shuffle(shuffle_index)
train_x = [imdb_train.docs[i] for i in shuffle_index]
train_y = [imdb_train.labels[i] for i in shuffle_index]
test_x = imdb_test.docs
test_y = imdb_test.labels
```
从样本长度上可以看到,每个样本的长度是不相同的。然而,在模型的训练过程中,需要保证每个样本的长度相同,以便于构造矩阵进行批量运算。
因此,需要先对所有样本进行填充或截断,使样本的长度一致。
```
def vectorizer(input, label=None, length=2000):
if label is not None:
for x, y in zip(input, label):
yield np.array((x + [0]*length)[:length]).astype('int64'), np.array([y]).astype('int64')
else:
for x in input:
yield np.array((x + [0]*length)[:length]).astype('int64')
```
### 2.2 载入预训练向量
以下给出的文件较小,可以直接完全载入内存。对于大型的预训练向量,无法一次载入内存的,可以采用分批载入,并行处理的方式进行匹配。
此外,AIStudio 中提供了 glove.6B 数据集挂载,用户可在 AIStudio 中直接载入数据集并解压。
```
# 下载并解压预训练向量
!wget http://nlp.stanford.edu/data/glove.6B.zip
!unzip -q glove.6B.zip
glove_path = "./glove.6B.100d.txt"
embeddings = {}
```
观察上述GloVe预训练向量文件一行的数据:
```
# 使用utf8编码解码
with open(glove_path, encoding='utf-8') as gf:
line = gf.readline()
print("GloVe单行数据:'%s'" % line)
```
可以看到,每一行都以单词开头,其后接上该单词的向量值,各个值之间用空格隔开。基于此,可以用如下方法得到所有词向量的字典。
```
with open(glove_path, encoding='utf-8') as gf:
for glove in gf:
word, embedding = glove.split(maxsplit=1)
embedding = [float(s) for s in embedding.split(' ')]
embeddings[word] = embedding
print("预训练词向量总数:%d" % len(embeddings))
print(f"单词'the'的向量是:{embeddings['the']}")
```
### 3.3 给数据集的词表匹配词向量
接下来,提取数据集的词表,需要注意的是,词表中的词编码的先后顺序是按照词出现的频率排列的,频率越高的词编码值越小。
```
word_idx = imdb_train.word_idx
vocab = [w for w in word_idx.keys()]
print(f"词表的前5个单词:{vocab[:5]}")
print(f"词表的后5个单词:{vocab[-5:]}")
```
观察词表的后5个单词,发现最后一个词是"\<unk\>",这个符号代表所有词表以外的词。另外,对于形式b'the',是字符串'the'
的二进制编码形式,使用中注意使用b'the'.decode()来进行转换('\<unk\>'并没有进行二进制编码,注意区分)。
接下来,给词表中的每个词匹配对应的词向量。预训练词向量可能没有覆盖数据集词表中的所有词,对于没有的词,设该词的词
向量为零向量。
```
# 定义词向量的维度,注意与预训练词向量保持一致
dim = 100
vocab_embeddings = np.zeros((len(vocab), dim))
for ind, word in enumerate(vocab):
if word != '<unk>':
word = word.decode()
embedding = embeddings.get(word, np.zeros((dim,)))
vocab_embeddings[ind, :] = embedding
```
## 四、组网
### 4.1 构建基于预训练向量的Embedding
对于预训练向量的Embedding,一般期望它的参数不再变动,所以要设置trainable=False。如果希望在此基础上训练参数,则需要
设置trainable=True。
```
pretrained_attr = paddle.ParamAttr(name='embedding',
initializer=paddle.nn.initializer.Assign(vocab_embeddings),
trainable=False)
embedding_layer = paddle.nn.Embedding(num_embeddings=len(vocab),
embedding_dim=dim,
padding_idx=word_idx['<unk>'],
weight_attr=pretrained_attr)
```
### 4.2 构建分类器
这里,构建简单的基于一维卷积的分类模型,其结构为:Embedding->Conv1D->Pool1D->Linear。在定义Linear时,由于需要知
道输入向量的维度,可以按照公式[官方文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-beta/api/paddle/nn/layer/conv/Conv2d_cn.html)
来进行计算。这里给出计算的函数如下:
```
def cal_output_shape(input_shape, out_channels, kernel_size, stride, padding=0, dilation=1):
return out_channels, int((input_shape + 2*padding - (dilation*(kernel_size - 1) + 1)) / stride) + 1
# 定义每个样本的长度
length = 2000
# 定义卷积层参数
kernel_size = 5
out_channels = 10
stride = 2
padding = 0
output_shape = cal_output_shape(length, out_channels, kernel_size, stride, padding)
output_shape = cal_output_shape(output_shape[1], output_shape[0], 2, 2, 0)
sim_model = paddle.nn.Sequential(embedding_layer,
paddle.nn.Conv1D(in_channels=dim, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, data_format='NLC', bias_attr=True),
paddle.nn.ReLU(),
paddle.nn.MaxPool1D(kernel_size=2, stride=2),
paddle.nn.Flatten(),
paddle.nn.Linear(in_features=np.prod(output_shape), out_features=2, bias_attr=True),
paddle.nn.Softmax())
paddle.summary(sim_model, input_size=(-1, length), dtypes='int64')
```
### 4.3 读取数据,进行训练
可以利用飞桨2.0的io.Dataset模块来构建一个数据的读取器,方便地将数据进行分批训练。
```
class DataReader(Dataset):
def __init__(self, input, label, length):
self.data = list(vectorizer(input, label, length=length))
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return len(self.data)
# 定义输入格式
input_form = paddle.static.InputSpec(shape=[None, length], dtype='int64', name='input')
label_form = paddle.static.InputSpec(shape=[None, 1], dtype='int64', name='label')
model = paddle.Model(sim_model, input_form, label_form)
model.prepare(optimizer=paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()),
loss=paddle.nn.loss.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
# 分割训练集和验证集
eval_length = int(len(train_x) * 1/4)
model.fit(train_data=DataReader(train_x[:-eval_length], train_y[:-eval_length], length),
eval_data=DataReader(train_x[-eval_length:], train_y[-eval_length:], length),
batch_size=32, epochs=10, verbose=1)
```
## 五、评估效果并用模型预测
```
# 评估
model.evaluate(eval_data=DataReader(test_x, test_y, length), batch_size=32, verbose=1)
# 预测
true_y = test_y[100:105] + test_y[-110:-105]
pred_y = model.predict(DataReader(test_x[100:105] + test_x[-110:-105], None, length), batch_size=1)
test_x_doc = test_x[100:105] + test_x[-110:-105]
# 标签编码转文字
label_id2text = {0: 'positive', 1: 'negative'}
for index, y in enumerate(pred_y[0]):
print("原文本:%s" % ' '.join([vocab[i].decode() for i in test_x_doc[index] if i < len(vocab) - 1]))
print("预测的标签是:%s, 实际标签是:%s" % (label_id2text[np.argmax(y)], label_id2text[true_y[index]]))
```
|
github_jupyter
|
```
#本章需导入的模块
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pylab import *
import matplotlib.cm as cm
import warnings
warnings.filterwarnings(action = 'ignore')
%matplotlib inline
plt.rcParams['font.sans-serif']=['SimHei'] #解决中文显示乱码问题
plt.rcParams['axes.unicode_minus']=False
from sklearn import svm
import sklearn.linear_model as LM
import scipy.stats as st
from scipy.optimize import root,fsolve
from sklearn.feature_selection import VarianceThreshold,SelectKBest,f_classif,chi2
from sklearn.feature_selection import RFE,RFECV,SelectFromModel
from sklearn.linear_model import Lasso,LassoCV,lasso_path,Ridge,RidgeCV
from sklearn.linear_model import enet_path,ElasticNetCV,ElasticNet
data=pd.read_table('邮政编码数据.txt',sep=' ',header=None)
tmp=data.loc[(data[0]==1) | (data[0]==3)]
X=tmp.iloc[:,1:-1]
Y=tmp.iloc[:,0]
fig,axes=plt.subplots(nrows=1,ncols=2,figsize=(12,5))
alphas=list(np.linspace(0,1,20))
alphas.extend([2,3])
coef=np.zeros((len(alphas),X.shape[1]))
err=[]
for i,alpha in enumerate(alphas):
modelLasso = Lasso(alpha=alpha)
modelLasso.fit(X,Y)
if i==0:
coef[i]=modelLasso.coef_
else:
coef[i]=(modelLasso.coef_/coef[0])
err.append(1-modelLasso.score(X,Y))
print('前5个变量的回归系数(alpha=0):%s'%coef[0,][0:5])
for i in np.arange(0,X.shape[1]):
axes[0].plot(coef[1:-1,i])
axes[0].set_title("Lasso回归中的收缩参数alpha和回归系数")
axes[0].set_xlabel("收缩参数alpha变化")
axes[0].set_xticks(np.arange(len(alphas)))
axes[0].set_ylabel("Beta(alpha)/Beta(alpha=0)")
axes[1].plot(err)
axes[1].set_title("Lasso回归中的收缩参数alpha和训练误差")
axes[1].set_xlabel("收缩参数alpha变化")
axes[1].set_xticks(np.arange(len(alphas)))
axes[1].set_ylabel("错判率")
alphas_lasso, coefs_lasso, _ = lasso_path(X, Y)
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
plt.xlabel('-Log(alpha)')
plt.ylabel('回归系数')
plt.title('Lasso回归中的收缩参数alpha和回归系数')
plt.show()
model = LassoCV() #默认采用3-折交叉验证确定的alpha
model.fit(X,Y)
print('Lasso剔除的变量:%d'%sum(model.coef_==0))
print('Lasso的最佳的alpha:',model.alpha_) # 只有在使用LassoCV有效
lassoAlpha=model.alpha_
estimator = Lasso(alpha=lassoAlpha)
selector=SelectFromModel(estimator=estimator)
selector.fit(X,Y)
print("阈值:%s"%selector.threshold_)
print("保留的特征个数:%d"%len(selector.get_support(indices=True)))
Xtmp=selector.inverse_transform(selector.transform(X))
plt.figure(figsize=(8,8))
np.random.seed(1)
ids=np.random.choice(len(Y),25)
for i,item in enumerate(ids):
img=np.array(Xtmp[item,]).reshape((16,16))
plt.subplot(5,5,i+1)
plt.imshow(img,cmap=cm.gray)
plt.show()
modelLasso = Lasso(alpha=lassoAlpha)
modelLasso.fit(X,Y)
print("lasso训练误差:%.2f"%(1-modelLasso.score(X,Y)))
modelRidge = RidgeCV() # RidgeCV自动调节alpha可以实现选择最佳的alpha。
modelRidge.fit(X,Y)
print('岭回归剔除的变量:%d'%sum(modelRidge.coef_==0))
print('岭回归最优alpha:',modelRidge.alpha_)
print("岭回归训练误差:%.2f"%(1-modelRidge.score(X,Y)))
```
|
github_jupyter
|
################################################################################
#Licensed Materials - Property of IBM
#(C) Copyright IBM Corp. 2019
#US Government Users Restricted Rights - Use, duplication disclosure restricted
#by GSA ADP Schedule Contract with IBM Corp.
################################################################################
The auto-generated notebooks are subject to the International License Agreement for Non-Warranted Programs (or equivalent) and License Information document for Watson Studio Auto-generated Notebook ("License Terms"), such agreements located in the link below.
Specifically, the Source Components and Sample Materials clause included in the License Information document for
Watson Studio Auto-generated Notebook applies to the auto-generated notebooks.
By downloading, copying, accessing, or otherwise using the materials, you agree to the License Terms.
http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?li_formnum=L-AMCU-BHU2B7&title=IBM%20Watson%20Studio%20Auto-generated%20Notebook%20V2.1
## IBM AutoAI Auto-Generated Notebook v1.11.7
### Representing Pipeline: P8 from run 25043980-e0c9-476a-8385-755ecd49aa48
**Note**: Notebook code generated using AutoAI will execute successfully.
If code is modified or reordered, there is no guarantee it will successfully execute.
This pipeline is optimized for the original dataset. The pipeline may fail or produce sub-optimium results if used with different data.
For different data, please consider returning to AutoAI Experiements to generate a new pipeline.
Please read our documentation for more information:
(IBM Cloud Platform) https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/autoai-notebook.html .
(IBM Cloud Pak For Data) https://www.ibm.com/support/knowledgecenter/SSQNUZ_3.0.0/wsj/analyze-data/autoai-notebook.html .
Before modifying the pipeline or trying to re-fit the pipeline, consider:
The notebook converts dataframes to numpy arrays before fitting the pipeline (a current restriction of the preprocessor pipeline).
The known_values_list is passed by reference and populated with categorical values during fit of the preprocessing pipeline. Delete its members before re-fitting.
### 1. Set Up
```
#attempt import of autoai_libs and install if missing
try:
import autoai_libs
except Exception as e:
print('attempting to install missing autoai_libs from pypi, this may take tens of seconds to complete.')
import subprocess
try:
# attempt to install missing autoai-libs from pypi
out = subprocess.check_output('pip install autoai-libs', shell=True)
for line in out.splitlines():
print(line)
except Exception as e:
print(str(e))
try:
import autoai_libs
except Exception as e:
print('attempting to install missing autoai_libs from local filesystem, this may take tens of seconds to complete.')
import subprocess
# attempt to install missing autoai-libs from local filesystem
try:
out = subprocess.check_output('pip install .', shell=True, cwd='software/autoai_libs')
for line in out.splitlines():
print(line)
import autoai_libs
except Exception as e:
print(str(e))
import sklearn
try:
import xgboost
except:
print('xgboost, if needed, will be installed and imported later')
try:
import lightgbm
except:
print('lightgbm, if needed, will be installed and imported later')
from sklearn.cluster import FeatureAgglomeration
import numpy
from numpy import inf, nan, dtype, mean
from autoai_libs.sklearn.custom_scorers import CustomScorers
from autoai_libs.cognito.transforms.transform_utils import TExtras, FC
from autoai_libs.transformers.exportable import *
from autoai_libs.utils.exportable_utils import *
from sklearn.pipeline import Pipeline
known_values_list=[]
# compose a decorator to assist pipeline instantiation via import of modules and installation of packages
def decorator_retries(func):
def install_import_retry(*args, **kwargs):
retries = 0
successful = False
failed_retries = 0
while retries < 100 and failed_retries < 10 and not successful:
retries += 1
failed_retries += 1
try:
result = func(*args, **kwargs)
successful = True
except Exception as e:
estr = str(e)
if estr.startswith('name ') and estr.endswith(' is not defined'):
try:
import importlib
module_name = estr.split("'")[1]
module = importlib.import_module(module_name)
globals().update({module_name: module})
print('import successful for ' + module_name)
failed_retries -= 1
except Exception as import_failure:
print('import of ' + module_name + ' failed with: ' + str(import_failure))
import subprocess
print('attempting pip install of ' + module_name)
process = subprocess.Popen('pip install ' + module_name, shell=True)
process.wait()
try:
print('re-attempting import of ' + module_name)
module = importlib.import_module(module_name)
globals().update({module_name: module})
print('import successful for ' + module_name)
failed_retries -= 1
except Exception as import_or_installation_failure:
print('failure installing and/or importing ' + module_name + ' error was: ' + str(
import_or_installation_failure))
raise (ModuleNotFoundError('Missing package in environment for ' + module_name +
'? Try import and/or pip install manually?'))
elif type(e) is AttributeError:
if 'module ' in estr and ' has no attribute ' in estr:
pieces = estr.split("'")
if len(pieces) == 5:
try:
import importlib
print('re-attempting import of ' + pieces[3] + ' from ' + pieces[1])
module = importlib.import_module('.' + pieces[3], pieces[1])
failed_retries -= 1
except:
print('failed attempt to import ' + pieces[3])
raise (e)
else:
raise (e)
else:
raise (e)
if successful:
print('Pipeline successfully instantiated')
else:
raise (ModuleNotFoundError(
'Remaining missing imports/packages in environment? Retry cell and/or try pip install manually?'))
return result
return install_import_retry
```
### 2. Compose Pipeline
```
# metadata necessary to replicate AutoAI scores with the pipeline
_input_metadata = {'run_uid': '25043980-e0c9-476a-8385-755ecd49aa48', 'pn': 'P8', 'data_source': '', 'target_label_name': 'charges', 'learning_type': 'regression', 'optimization_metric': 'neg_root_mean_squared_error', 'random_state': 33, 'cv_num_folds': 3, 'holdout_fraction': 0.1, 'pos_label': None}
# define a function to compose the pipeline, and invoke it
@decorator_retries
def compose_pipeline():
import numpy
from numpy import nan, dtype, mean
#
# composing steps for toplevel Pipeline
#
_input_metadata = {'run_uid': '25043980-e0c9-476a-8385-755ecd49aa48', 'pn': 'P8', 'data_source': '', 'target_label_name': 'charges', 'learning_type': 'regression', 'optimization_metric': 'neg_root_mean_squared_error', 'random_state': 33, 'cv_num_folds': 3, 'holdout_fraction': 0.1, 'pos_label': None}
steps = []
#
# composing steps for preprocessor Pipeline
#
preprocessor__input_metadata = None
preprocessor_steps = []
#
# composing steps for preprocessor_features FeatureUnion
#
preprocessor_features_transformer_list = []
#
# composing steps for preprocessor_features_categorical Pipeline
#
preprocessor_features_categorical__input_metadata = None
preprocessor_features_categorical_steps = []
preprocessor_features_categorical_steps.append(('cat_column_selector', autoai_libs.transformers.exportable.NumpyColumnSelector(columns=[0, 1, 3, 4, 5])))
preprocessor_features_categorical_steps.append(('cat_compress_strings', autoai_libs.transformers.exportable.CompressStrings(activate_flag=True, compress_type='hash', dtypes_list=['int_num', 'char_str', 'int_num', 'char_str', 'char_str'], missing_values_reference_list=['', '-', '?', nan], misslist_list=[[], [], [], [], []])))
preprocessor_features_categorical_steps.append(('cat_missing_replacer', autoai_libs.transformers.exportable.NumpyReplaceMissingValues(filling_values=nan, missing_values=[])))
preprocessor_features_categorical_steps.append(('cat_unknown_replacer', autoai_libs.transformers.exportable.NumpyReplaceUnknownValues(filling_values=nan, filling_values_list=[nan, nan, nan, nan, nan], known_values_list=known_values_list, missing_values_reference_list=['', '-', '?', nan])))
preprocessor_features_categorical_steps.append(('boolean2float_transformer', autoai_libs.transformers.exportable.boolean2float(activate_flag=True)))
preprocessor_features_categorical_steps.append(('cat_imputer', autoai_libs.transformers.exportable.CatImputer(activate_flag=True, missing_values=nan, sklearn_version_family='20', strategy='most_frequent')))
preprocessor_features_categorical_steps.append(('cat_encoder', autoai_libs.transformers.exportable.CatEncoder(activate_flag=True, categories='auto', dtype=numpy.float64, encoding='ordinal', handle_unknown='error', sklearn_version_family='20')))
preprocessor_features_categorical_steps.append(('float32_transformer', autoai_libs.transformers.exportable.float32_transform(activate_flag=True)))
# assembling preprocessor_features_categorical_ Pipeline
preprocessor_features_categorical_pipeline = sklearn.pipeline.Pipeline(steps=preprocessor_features_categorical_steps)
preprocessor_features_transformer_list.append(('categorical', preprocessor_features_categorical_pipeline))
#
# composing steps for preprocessor_features_numeric Pipeline
#
preprocessor_features_numeric__input_metadata = None
preprocessor_features_numeric_steps = []
preprocessor_features_numeric_steps.append(('num_column_selector', autoai_libs.transformers.exportable.NumpyColumnSelector(columns=[2])))
preprocessor_features_numeric_steps.append(('num_floatstr2float_transformer', autoai_libs.transformers.exportable.FloatStr2Float(activate_flag=True, dtypes_list=['float_num'], missing_values_reference_list=[])))
preprocessor_features_numeric_steps.append(('num_missing_replacer', autoai_libs.transformers.exportable.NumpyReplaceMissingValues(filling_values=nan, missing_values=[])))
preprocessor_features_numeric_steps.append(('num_imputer', autoai_libs.transformers.exportable.NumImputer(activate_flag=True, missing_values=nan, strategy='median')))
preprocessor_features_numeric_steps.append(('num_scaler', autoai_libs.transformers.exportable.OptStandardScaler(num_scaler_copy=None, num_scaler_with_mean=None, num_scaler_with_std=None, use_scaler_flag=False)))
preprocessor_features_numeric_steps.append(('float32_transformer', autoai_libs.transformers.exportable.float32_transform(activate_flag=True)))
# assembling preprocessor_features_numeric_ Pipeline
preprocessor_features_numeric_pipeline = sklearn.pipeline.Pipeline(steps=preprocessor_features_numeric_steps)
preprocessor_features_transformer_list.append(('numeric', preprocessor_features_numeric_pipeline))
# assembling preprocessor_features_ FeatureUnion
preprocessor_features_pipeline = sklearn.pipeline.FeatureUnion(transformer_list=preprocessor_features_transformer_list)
preprocessor_steps.append(('features', preprocessor_features_pipeline))
preprocessor_steps.append(('permuter', autoai_libs.transformers.exportable.NumpyPermuteArray(axis=0, permutation_indices=[0, 1, 3, 4, 5, 2])))
# assembling preprocessor_ Pipeline
preprocessor_pipeline = sklearn.pipeline.Pipeline(steps=preprocessor_steps)
steps.append(('preprocessor', preprocessor_pipeline))
#
# composing steps for cognito Pipeline
#
cognito__input_metadata = None
cognito_steps = []
cognito_steps.append(('0', autoai_libs.cognito.transforms.transform_utils.TA2(fun=numpy.add, name='sum', datatypes1=['intc', 'intp', 'int_', 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', 'int32', 'int64', 'short', 'long', 'longlong', 'float16', 'float32', 'float64'], feat_constraints1=[autoai_libs.utils.fc_methods.is_not_categorical], datatypes2=['intc', 'intp', 'int_', 'uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16', 'int32', 'int64', 'short', 'long', 'longlong', 'float16', 'float32', 'float64'], feat_constraints2=[autoai_libs.utils.fc_methods.is_not_categorical], tgraph=None, apply_all=True, col_names=['age', 'sex', 'bmi', 'children', 'smoker', 'region'], col_dtypes=[dtype('float32'), dtype('float32'), dtype('float32'), dtype('float32'), dtype('float32'), dtype('float32')], col_as_json_objects=None)))
cognito_steps.append(('1', autoai_libs.cognito.transforms.transform_utils.FS1(cols_ids_must_keep=range(0, 6), additional_col_count_to_keep=8, ptype='regression')))
# assembling cognito_ Pipeline
cognito_pipeline = sklearn.pipeline.Pipeline(steps=cognito_steps)
steps.append(('cognito', cognito_pipeline))
steps.append(('estimator', sklearn.ensemble.forest.RandomForestRegressor(bootstrap=True, criterion='friedman_mse', max_depth=4, max_features=0.9832410473940374, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=3, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=29, n_jobs=4, oob_score=False, random_state=33, verbose=0, warm_start=False)))
# assembling Pipeline
pipeline = sklearn.pipeline.Pipeline(steps=steps)
return pipeline
pipeline = compose_pipeline()
```
### 3. Extract needed parameter values from AutoAI run metadata
```
# Metadata used in retrieving data and computing metrics. Customize as necessary for your environment.
#data_source='replace_with_path_and_csv_filename'
target_label_name = _input_metadata['target_label_name']
learning_type = _input_metadata['learning_type']
optimization_metric = _input_metadata['optimization_metric']
random_state = _input_metadata['random_state']
cv_num_folds = _input_metadata['cv_num_folds']
holdout_fraction = _input_metadata['holdout_fraction']
if 'data_provenance' in _input_metadata:
data_provenance = _input_metadata['data_provenance']
else:
data_provenance = None
if 'pos_label' in _input_metadata and learning_type == 'classification':
pos_label = _input_metadata['pos_label']
else:
pos_label = None
```
### 4. Create dataframe from dataset in IBM Cloud Object Storage or IBM Cloud Pak For Data
```
# @hidden_cell
# The following code contains the credentials for a file in your IBM Cloud Object Storage.
# You might want to remove those credentials before you share your notebook.
credentials_0 = {
}
# Read the data as a dataframe
import pandas as pd
csv_encodings=['UTF-8','Latin-1'] # supplement list of encodings as necessary for your data
df = None
readable = None # if automatic detection fails, you can supply a filename here
# First, obtain a readable object
# IBM Cloud Object Storage data access
# Assumes COS credentials are in a dictionary named 'credentials_0'
cos_credentials = df = globals().get('credentials_0')
if readable is None and cos_credentials is not None:
print('accessing data via IBM Cloud Object Storage')
try:
import types
from botocore.client import Config
import ibm_boto3
def __iter__(self): return 0
if 'SERVICE_NAME' not in cos_credentials: # in case of Studio-supplied credentials for a different dataset
cos_credentials['SERVICE_NAME'] = 's3'
client = ibm_boto3.client(service_name=cos_credentials['SERVICE_NAME'],
ibm_api_key_id=cos_credentials['IBM_API_KEY_ID'],
ibm_auth_endpoint=cos_credentials['IBM_AUTH_ENDPOINT'],
config=Config(signature_version='oauth'),
endpoint_url=cos_credentials['ENDPOINT'])
try:
readable = client.get_object(Bucket=cos_credentials['BUCKET'],Key=cos_credentials['FILE'])['Body']
# add missing __iter__ method, so pandas accepts readable as file-like object
if not hasattr(readable, "__iter__"): readable.__iter__ = types.MethodType( __iter__, readable )
except Exception as cos_access_exception:
print('unable to access data object in cloud object storage with credentials supplied')
except Exception as cos_exception:
print('unable to create client for cloud object storage')
# IBM Cloud Pak for Data data access
project_filename = globals().get('project_filename')
if readable is None and 'credentials_0' in globals() and 'ASSET_ID' in credentials_0:
project_filename = credentials_0['ASSET_ID']
if project_filename is not None:
print('attempting project_lib access to ' + str(project_filename))
try:
from project_lib import Project
project = Project.access()
storage_credentials = project.get_storage_metadata()
readable = project.get_file(project_filename)
except Exception as project_exception:
print('unable to access data using the project_lib interface and filename supplied')
# Use data_provenance as filename if other access mechanisms are unsuccessful
if readable is None and type(data_provenance) is str:
print('attempting to access local file using path and name ' + data_provenance)
readable = data_provenance
# Second, use pd.read_csv to read object, iterating over list of csv_encodings until successful
if readable is not None:
for encoding in csv_encodings:
try:
df = pd.read_csv(readable, encoding=encoding)
print('successfully loaded dataframe using encoding = ' + str(encoding))
break
except Exception as exception_csv:
print('unable to read csv using encoding ' + str(encoding))
print('handled error was ' + str(exception_csv))
if df is None:
print('unable to read file/object as a dataframe using supplied csv_encodings ' + str(csv_encodings))
print("Please use 'insert to code' on data panel to load dataframe.")
raise(ValueError('unable to read file/object as a dataframe using supplied csv_encodings ' + str(csv_encodings)))
if df is None:
print('Unable to access bucket/file in IBM Cloud Object Storage or asset in IBM Cloud Pak for Data with the parameters supplied.')
print('This is abnormal, but proceeding assuming the notebook user will supply a dataframe by other means.')
print("Please use 'insert to code' on data panel to load dataframe.")
```
### 5. Preprocess Data
```
# Drop rows whose target is not defined
target = target_label_name # your target name here
if learning_type == 'regression':
df[target] = pd.to_numeric(df[target], errors='coerce')
df.dropna('rows', how='any', subset=[target], inplace=True)
# extract X and y
df_X = df.drop(columns=[target])
df_y = df[target]
# Detach preprocessing pipeline (which needs to see all training data)
preprocessor_index = -1
preprocessing_steps = []
for i, step in enumerate(pipeline.steps):
preprocessing_steps.append(step)
if step[0]=='preprocessor':
preprocessor_index = i
break
if len(pipeline.steps) > preprocessor_index+1 and pipeline.steps[preprocessor_index + 1][0] == 'cognito':
preprocessor_index += 1
preprocessing_steps.append(pipeline.steps[preprocessor_index])
if preprocessor_index >= 0:
preprocessing_pipeline = Pipeline(memory=pipeline.memory, steps=preprocessing_steps)
pipeline = Pipeline(steps=pipeline.steps[preprocessor_index+1:])
# Preprocess X
# preprocessor should see all data for cross_validate on the remaining steps to match autoai scores
known_values_list.clear() # known_values_list is filled in by the preprocessing_pipeline if needed
preprocessing_pipeline.fit(df_X.values, df_y.values)
X_prep = preprocessing_pipeline.transform(df_X.values)
```
### 6. Split data into Training and Holdout sets
```
# determine learning_type and perform holdout split (stratify conditionally)
if learning_type is None:
# When the problem type is not available in the metadata, use the sklearn type_of_target to determine whether to stratify the holdout split
# Caution: This can mis-classify regression targets that can be expressed as integers as multiclass, in which case manually override the learning_type
from sklearn.utils.multiclass import type_of_target
if type_of_target(df_y.values) in ['multiclass', 'binary']:
learning_type = 'classification'
else:
learning_type = 'regression'
print('learning_type determined by type_of_target as:',learning_type)
else:
print('learning_type specified as:',learning_type)
from sklearn.model_selection import train_test_split
if learning_type == 'classification':
X, X_holdout, y, y_holdout = train_test_split(X_prep, df_y.values, test_size=holdout_fraction, random_state=random_state, stratify=df_y.values)
else:
X, X_holdout, y, y_holdout = train_test_split(X_prep, df_y.values, test_size=holdout_fraction, random_state=random_state)
```
### 7. Additional setup: Define a function that returns a scorer for the target's positive label
```
# create a function to produce a scorer for a given positive label
def make_pos_label_scorer(scorer, pos_label):
kwargs = {'pos_label':pos_label}
for prop in ['needs_proba', 'needs_threshold']:
if prop+'=True' in scorer._factory_args():
kwargs[prop] = True
if scorer._sign == -1:
kwargs['greater_is_better'] = False
from sklearn.metrics import make_scorer
scorer=make_scorer(scorer._score_func, **kwargs)
return scorer
```
### 8. Fit pipeline, predict on Holdout set, calculate score, perform cross-validation
```
# fit the remainder of the pipeline on the training data
pipeline.fit(X,y)
# predict on the holdout data
y_pred = pipeline.predict(X_holdout)
# compute score for the optimization metric
# scorer may need pos_label, but not all scorers take pos_label parameter
from sklearn.metrics import get_scorer
scorer = get_scorer(optimization_metric)
score = None
#score = scorer(pipeline, X_holdout, y_holdout) # this would suffice for simple cases
pos_label = None # if you want to supply the pos_label, specify it here
if pos_label is None and 'pos_label' in _input_metadata:
pos_label=_input_metadata['pos_label']
try:
score = scorer(pipeline, X_holdout, y_holdout)
except Exception as e1:
if pos_label is None or str(pos_label)=='':
print('You may have to provide a value for pos_label in order for a score to be calculated.')
raise(e1)
else:
exception_string=str(e1)
if 'pos_label' in exception_string:
try:
scorer = make_pos_label_scorer(scorer, pos_label=pos_label)
score = scorer(pipeline, X_holdout, y_holdout)
print('Retry was successful with pos_label supplied to scorer')
except Exception as e2:
print('Initial attempt to use scorer failed. Exception was:')
print(e1)
print('')
print('Retry with pos_label failed. Exception was:')
print(e2)
else:
raise(e1)
if score is not None:
print(score)
# cross_validate pipeline using training data
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold, KFold
if learning_type == 'classification':
fold_generator = StratifiedKFold(n_splits=cv_num_folds, random_state=random_state)
else:
fold_generator = KFold(n_splits=cv_num_folds, random_state=random_state)
cv_results = cross_validate(pipeline, X, y, cv=fold_generator, scoring={optimization_metric:scorer}, return_train_score=True)
import numpy as np
np.mean(cv_results['test_' + optimization_metric])
cv_results
```
|
github_jupyter
|
```
# Necessary imports
import re
import emoji
from gtrans import translate_text, translate_html
import random
import pandas as pd
import numpy as np
from multiprocessing import Pool
import time
# Function to remove emojis in text, since these conflict during translation
def remove_emoji(text):
return emoji.get_emoji_regexp().sub(u'', text)
def approximate_emoji_insert(string, index,char):
if(index<(len(string)-1)):
while(string[index]!=' ' ):
if(index+1==len(string)):
break
index=index+1
return string[:index] + ' '+char + ' ' + string[index:]
else:
return string + ' '+char + ' '
def extract_emojis(str1):
try:
return [(c,i) for i,c in enumerate(str1) if c in emoji.UNICODE_EMOJI]
except AttributeError:
return []
# Use multiprocessing framework for speeding up translation process
def parallelize_dataframe(df, func, n_cores=4):
'''parallelize the dataframe'''
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
# Main function for translation
def translate(x,lang):
'''provide the translation given text and the language'''
#x=preprocess_lib.preprocess_multi(x,lang,multiple_sentences=False,stop_word_remove=False, tokenize_word=False, tokenize_sentence=False)
emoji_list=extract_emojis(x)
try:
translated_text=translate_text(x,lang,'en')
except:
translated_text=x
for ele in emoji_list:
translated_text=approximate_emoji_insert(translated_text, ele[1],ele[0])
return translated_text
def add_features(df):
'''adding new features to the dataframe'''
translated_text=[]
for index,row in df.iterrows():
if(row['lang']in ['en','unk']):
translated_text.append(row['text'])
else:
translated_text.append(translate(row['text'],row['lang']))
df["translated"]=translated_text
return df
import glob
train_files = glob.glob('train/*.csv')
test_files = glob.glob('test/*.csv')
val_files = glob.glob('val/*.csv')
files= train_files+test_files+val_files
from tqdm import tqdm_notebook
size=10
for file in files:
wp_data=pd.read_csv(file)
list_df=[]
for i in tqdm_notebook(range(0,100,size)):
print(i,"_iteration")
df_new=parallelize_dataframe(wp_data[i:i+size],add_features,n_cores=20)
list_df.append(df_new)
df_translated=pd.concat(list_df,ignore_index=True)
file_name='translated'+file
df_translated.to_csv(file_name)
```
|
github_jupyter
|
# REWARD-MODULATED SELF ORGANISING RECURRENT NEURAL NETWORK
https://www.frontiersin.org/articles/10.3389/fncom.2015.00036/full
### IMPORT REQUIRED LIBRARIES
```
from __future__ import division
import numpy as np
from scipy.stats import norm
import random
import tqdm
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import heapq
import pickle
import torch as torch
from sorn.utils import Initializer
torch.manual_seed(1)
random.seed(1)
np.random.seed(1)
```
### UTILS
```
def normalize_weight_matrix(weight_matrix):
# Applied only while initializing the weight. Later Synaptic scalling applied on weight matrices
""" Normalize the weights in the matrix such that incoming connections to a neuron sum up to 1
Args:
weight_matrix(array) -- Incoming Weights from W_ee or W_ei or W_ie
Returns:
weight_matrix(array) -- Normalized weight matrix"""
normalized_weight_matrix = weight_matrix / np.sum(weight_matrix,axis = 0)
return normalized_weight_matrix
```
### Implement lambda incoming connections for Excitatory neurons and outgoing connections per Inhibitory neuron
```
def generate_lambd_connections(synaptic_connection,ne,ni, lambd_w,lambd_std):
"""
Args:
synaptic_connection - Type of sysnpatic connection (EE,EI or IE)
ne - Number of excitatory units
ni - Number of inhibitory units
lambd_w - Average number of incoming connections
lambd_std - Standard deviation of average number of connections per neuron
Returns:
connection_weights - Weight matrix
"""
if synaptic_connection == 'EE':
"""Choose random lamda connections per neuron"""
# Draw normally distribued ne integers with mean lambd_w
lambdas_incoming = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int)
# lambdas_outgoing = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int)
# List of neurons
list_neurons= list(range(ne))
# Connection weights
connection_weights = np.zeros((ne,ne))
# For each lambd value in the above list,
# generate weights for incoming and outgoing connections
#-------------Gaussian Distribution of weights --------------
# weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution
# Centered around 2 to make all values positive
# ------------Uniform Distribution --------------------------
global_incoming_weights = np.random.uniform(0.0,0.1,sum(lambdas_incoming))
# Index Counter
global_incoming_weights_idx = 0
# Choose the neurons in order [0 to 199]
for neuron in list_neurons:
### Choose ramdom unique (lambdas[neuron]) neurons from list_neurons
possible_connections = list_neurons.copy()
possible_connections.remove(neuron) # Remove the selected neuron from possible connections i!=j
# Choose random presynaptic neurons
possible_incoming_connections = random.sample(possible_connections,lambdas_incoming[neuron])
incoming_weights_neuron = global_incoming_weights[global_incoming_weights_idx:global_incoming_weights_idx+lambdas_incoming[neuron]]
# ---------- Update the connection weight matrix ------------
# Update incoming connection weights for selected 'neuron'
for incoming_idx,incoming_weight in enumerate(incoming_weights_neuron):
connection_weights[possible_incoming_connections[incoming_idx]][neuron] = incoming_weight
global_incoming_weights_idx += lambdas_incoming[neuron]
return connection_weights
if synaptic_connection == 'EI':
"""Choose random lamda connections per neuron"""
# Draw normally distribued ni integers with mean lambd_w
lambdas = norm.ppf(np.random.random(ni), loc=lambd_w, scale=lambd_std).astype(int)
# List of neurons
list_neurons= list(range(ni)) # Each i can connect with random ne neurons
# Initializing connection weights variable
connection_weights = np.zeros((ni,ne))
# ------------Uniform Distribution -----------------------------
global_outgoing_weights = np.random.uniform(0.0,0.1,sum(lambdas))
# Index Counter
global_outgoing_weights_idx = 0
# Choose the neurons in order [0 to 40]
for neuron in list_neurons:
### Choose ramdom unique (lambdas[neuron]) neurons from list_neurons
possible_connections = list(range(ne))
possible_outgoing_connections = random.sample(possible_connections,lambdas[neuron]) # possible_outgoing connections to the neuron
# Update weights
outgoing_weights = global_outgoing_weights[global_outgoing_weights_idx:global_outgoing_weights_idx+lambdas[neuron]]
# ---------- Update the connection weight matrix ------------
# Update outgoing connections for the neuron
for outgoing_idx,outgoing_weight in enumerate(outgoing_weights): # Update the columns in the connection matrix
connection_weights[neuron][possible_outgoing_connections[outgoing_idx]] = outgoing_weight
# Update the global weight values index
global_outgoing_weights_idx += lambdas[neuron]
return connection_weights
```
### More Util functions
```
def get_incoming_connection_dict(weights):
# Get the non-zero entires in columns is the incoming connections for the neurons
# Indices of nonzero entries in the columns
connection_dict=dict.fromkeys(range(1,len(weights)+1),0)
for i in range(len(weights[0])): # For each neuron
connection_dict[i] = list(np.nonzero(weights[:,i])[0])
return connection_dict
def get_outgoing_connection_dict(weights):
# Get the non-zero entires in rows is the outgoing connections for the neurons
# Indices of nonzero entries in the rows
connection_dict=dict.fromkeys(range(1,len(weights)+1),1)
for i in range(len(weights[0])): # For each neuron
connection_dict[i] = list(np.nonzero(weights[i,:])[0])
return connection_dict
def prune_small_weights(weights,cutoff_weight):
""" Prune the connections with negative connection strength"""
weights[weights <= cutoff_weight] = cutoff_weight
return weights
def set_max_cutoff_weight(weights, cutoff_weight):
""" Set cutoff limit for the values in given array"""
weights[weights > cutoff_weight] = cutoff_weight
return weights
def get_unconnected_indexes(wee):
"""
Helper function for Structural plasticity to randomly select the unconnected units
Args:
wee - Weight matrix
Returns:
list (indices) // indices = (row_idx,col_idx)"""
i,j = np.where(wee <= 0.)
indices = list(zip(i,j))
self_conn_removed = []
for i,idxs in enumerate(indices):
if idxs[0] != idxs[1]:
self_conn_removed.append(indices[i])
return self_conn_removed
def white_gaussian_noise(mu, sigma,t):
"""Generates white gaussian noise with mean mu, standard deviation sigma and
the noise length equals t """
noise = np.random.normal(mu, sigma, t)
return np.expand_dims(noise,1)
### SANITY CHECK EACH WEIGHTS
#### Note this function has no influence in weight matrix, will be deprecated in next version
def zero_sum_incoming_check(weights):
zero_sum_incomings = np.where(np.sum(weights,axis = 0) == 0.)
if len(zero_sum_incomings[-1]) == 0:
return weights
else:
for zero_sum_incoming in zero_sum_incomings[-1]:
rand_indices = np.random.randint(40,size = 2) # 5 because each excitatory neuron connects with 5 inhibitory neurons
# given the probability of connections 0.2
rand_values = np.random.uniform(0.0,0.1,2)
for i,idx in enumerate(rand_indices):
weights[:,zero_sum_incoming][idx] = rand_values[i]
return weights
```
### SORN
```
class Sorn(object):
"""SORN 1 network model Initialization"""
def __init__(self):
pass
"""Initialize network variables as class variables of SORN"""
nu = 4 # Number of input units
ne = 30 # Number of excitatory units
ni = int(0.2*ne) # Number of inhibitory units in the network
no = 1
eta_stdp = 0.004
eta_inhib = 0.001
eta_ip = 0.01
te_max = 1.0
ti_max = 0.5
ti_min = 0.0
te_min = 0.0
mu_ip = 0.1
sigma_ip = 0.0 # Standard deviation, variance == 0
# Initialize weight matrices
def initialize_weight_matrix(self, network_type,synaptic_connection, self_connection, lambd_w):
"""
Args:
network_type(str) - Spare or Dense
synaptic_connection(str) - EE,EI,IE: Note that Spare connection is defined only for EE connections
self_connection(str) - True or False: i-->i ; Network is tested only using j-->i
lambd_w(int) - Average number of incoming and outgoing connections per neuron
Returns:
weight_matrix(array) - Array of connection strengths
"""
if (network_type == "Sparse") and (self_connection == "False"):
"""Generate weight matrix for E-E/ E-I connections with mean lamda incoming and outgiong connections per neuron"""
weight_matrix = generate_lambd_connections(synaptic_connection,Sorn.ne,Sorn.ni,lambd_w,lambd_std = 1)
# Dense matrix for W_ie
elif (network_type == 'Dense') and (self_connection == 'False'):
# Gaussian distribution of weights
# weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution
# Centered around 1
# weight_matrix.reshape(Sorn.ne, Sorn.ni)
# weight_matrix *= 0.01 # Setting spectral radius
# Uniform distribution of weights
weight_matrix = np.random.uniform(0.0,0.1,(Sorn.ne, Sorn.ni))
weight_matrix.reshape((Sorn.ne,Sorn.ni))
elif (network_type == 'Dense_output') and (self_connection == 'False'):
# Gaussian distribution of weights
# weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution
# Centered around 1
# weight_matrix.reshape(Sorn.ne, Sorn.ni)
# weight_matrix *= 0.01 # Setting spectral radius
# Uniform distribution of weights
weight_matrix = np.random.uniform(0.0,0.1,(Sorn.no, Sorn.ne))
weight_matrix.reshape((Sorn.no,Sorn.ne))
return weight_matrix
def initialize_threshold_matrix(self, te_min,te_max, ti_min,ti_max):
# Initialize the threshold for excitatory and inhibitory neurons
"""Args:
te_min(float) -- Min threshold value for excitatory units
ti_min(float) -- Min threshold value for inhibitory units
te_max(float) -- Max threshold value for excitatory units
ti_max(float) -- Max threshold value for inhibitory units
Returns:
te(vector) -- Threshold values for excitatory units
ti(vector) -- Threshold values for inhibitory units"""
te = np.random.uniform(0., te_max, (Sorn.ne, 1))
ti = np.random.uniform(0., ti_max, (Sorn.ni, 1))
# For patter recognition task: Heavyside step function with fixed threshold
to = 0.5
return te, ti,to
def initialize_activity_vector(self,ne, ni, no):
# Initialize the activity vectors X and Y for excitatory and inhibitory neurons
"""Args:
ne(int) -- Number of excitatory neurons
ni(int) -- Number of inhibitory neurons
Returns:
x(array) -- Array of activity vectors of excitatory population
y(array) -- Array of activity vectors of inhibitory population"""
x = np.zeros((ne, 2))
y = np.zeros((ni, 2))
o = np.zeros((no, 2))
return x, y, o
class Plasticity(Sorn):
"""
Instance of class Sorn. Inherits the variables and functions defined in class Sorn
Encapsulates all plasticity mechanisms mentioned in the article """
# Initialize the global variables for the class //Class attributes
def __init__(self):
super().__init__()
self.nu = Sorn.nu # Number of input units
self.ne = Sorn.ne # Number of excitatory units
self.no = Sorn.no
self.eta_stdp = Sorn.eta_stdp # STDP plasticity Learning rate constant; SORN1 and SORN2
self.eta_ip = Sorn.eta_ip # Intrinsic plasticity learning rate constant; SORN1 and SORN2
self.eta_inhib = Sorn.eta_inhib # Intrinsic plasticity learning rate constant; SORN2 only
self.h_ip = 2 * Sorn.nu / Sorn.ne # Target firing rate
self.mu_ip = Sorn.mu_ip # Mean target firing rate
self.ni = Sorn.ni # Number of inhibitory units in the network
self.time_steps = Sorn.time_steps # Total time steps of simulation
self.te_min = Sorn.te_min # Excitatory minimum Threshold
self.te_max = Sorn.te_max # Excitatory maximum Threshold
def stdp(self, wee, x, mr, cutoff_weights):
""" Apply STDP rule : Regulates synaptic strength between the pre(Xj) and post(Xi) synaptic neurons"""
x = np.asarray(x)
xt_1 = x[:,0]
xt = x[:,1]
wee_t = wee.copy()
# STDP applies only on the neurons which are connected.
for i in range(len(wee_t[0])): # Each neuron i, Post-synaptic neuron
for j in range(len(wee_t[0:])): # Incoming connection from jth pre-synaptic neuron to ith neuron
if wee_t[j][i] != 0. : # Check connectivity
# Get the change in weight
delta_wee_t = mr*self.eta_stdp * (xt[i] * xt_1[j] - xt_1[i]*xt[j])
# Update the weight between jth neuron to i ""Different from notation in article
wee_t[j][i] = wee[j][i] + delta_wee_t
""" Prune the smallest weights induced by plasticity mechanisms; Apply lower cutoff weight"""
wee_t = prune_small_weights(wee_t,cutoff_weights[0])
"""Check and set all weights < upper cutoff weight """
wee_t = set_max_cutoff_weight(wee_t,cutoff_weights[1])
return wee_t
def ostdp(self,woe, x, mo):
""" Apply STDP rule : Regulates synaptic strength between the pre(Xj) and post(Xi) synaptic neurons"""
x = np.asarray(x)
xt_1 = x[:, 0]
xt = x[:, 1]
woe_t = woe.copy()
# STDP applies only on the neurons which are connected.
for i in range(len(woe_t[0])): # Each neuron i, Post-synaptic neuron
for j in range(len(woe_t[0:])): # Incoming connection from jth pre-synaptic neuron to ith neuron
if woe_t[j][i] != 0.: # Check connectivity
# Get the change in weight
delta_woe_t = mo*self.eta_stdp * (xt[i] * xt_1[j] - xt_1[i] * xt[j])
# Update the weight between jth neuron to i ""Different from notation in article
woe_t[j][i] = woe[j][i] + delta_woe_t
return woe_t
def ip(self, te, x):
# IP rule: Active unit increases its threshold and inactive decreases its threshold.
xt = x[:, 1]
te_update = te + self.eta_ip * (xt.reshape(self.ne, 1) - self.h_ip)
""" Check whether all te are in range [0.0,1.0] and update acordingly"""
# Update te < 0.0 ---> 0.0
# te_update = prune_small_weights(te_update,self.te_min)
# Set all te > 1.0 --> 1.0
# te_update = set_max_cutoff_weight(te_update,self.te_max)
return te_update
def ss(self, wee_t):
"""Synaptic Scaling or Synaptic Normalization"""
wee_t = wee_t / np.sum(wee_t,axis=0)
return wee_t
@staticmethod
def modulation_factor(reward_history, current_reward ,window_sizes):
""" Grid search for Modulation factor. Returns the maximum moving average over history of rewards with corresponding window
Args:
reward_history (list): List with the history of rewards
window_sizes (list): List of window sizes for gridsearch
Returns:
[int]: Modulation factor
"""
def running_mean(x, K):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[K:] - cumsum[:-K]) / float(K)
reward_avgs = [] # Holds the mean of all rolling averages for each window
for window_size in window_sizes:
if window_size<=len(reward_history):
reward_avgs.append(np.mean(running_mean(reward_history, window_size)))
best_reward= np.max(reward_avgs)
best_reward_window = window_sizes[np.argmax(best_reward)]
print("current_reward %s | Best rolling avergage reward %s | Best Rolling average window %s"%(current_reward, best_reward, best_reward_window ))
mo = current_reward - best_reward
mr = mo.copy()
# TODO: What if mo != mr ?
return mo, mr, best_reward, best_reward_window
###########################################################
@staticmethod
def initialize_plasticity():
"""NOTE: DO NOT TRANSPOSE THE WEIGHT MATRIX WEI FOR SORN 2 MODEL"""
# Create and initialize sorn object and variables
sorn_init = Sorn()
WEE_init = sorn_init.initialize_weight_matrix(network_type="Sparse", synaptic_connection='EE',
self_connection='False',
lambd_w=20)
WEI_init = sorn_init.initialize_weight_matrix(network_type="Dense", synaptic_connection='EI',
self_connection='False',
lambd_w=100)
WIE_init = sorn_init.initialize_weight_matrix(network_type="Dense", synaptic_connection='IE',
self_connection='False',
lambd_w=100)
WOE_init = sorn_init.initialize_weight_matrix(network_type="Dense_output", synaptic_connection='OE',
self_connection='False',
lambd_w=100)
Wee_init = Initializer.zero_sum_incoming_check(WEE_init)
Wei_init = Initializer.zero_sum_incoming_check(WEI_init.T) # For SORN 1
# Wei_init = Initializer.zero_sum_incoming_check(WEI_init)
Wie_init = Initializer.zero_sum_incoming_check(WIE_init)
Woe_init = Initializer.zero_sum_incoming_check(WOE_init.T)
c = np.count_nonzero(Wee_init)
v = np.count_nonzero(Wei_init)
b = np.count_nonzero(Wie_init)
d = np.count_nonzero(Woe_init)
print('Network Initialized')
print('Number of connections in Wee %s , Wei %s, Wie %s Woe %s' %(c, v, b, d))
print('Shapes Wee %s Wei %s Wie %s Woe %s' % (Wee_init.shape, Wei_init.shape, Wie_init.shape, Woe_init.shape))
# Normalize the incoming weights
normalized_wee = Initializer.normalize_weight_matrix(Wee_init)
normalized_wei = Initializer.normalize_weight_matrix(Wei_init)
normalized_wie = Initializer.normalize_weight_matrix(Wie_init)
te_init, ti_init, to_init = sorn_init.initialize_threshold_matrix(Sorn.te_min, Sorn.te_max, Sorn.ti_min, Sorn.ti_max)
x_init, y_init, o_init = sorn_init.initialize_activity_vector(Sorn.ne, Sorn.ni,Sorn.no)
return Wee_init, Wei_init, Wie_init,Woe_init, te_init, ti_init, to_init,x_init, y_init, o_init
@staticmethod
def reorganize_network():
pass
class MatrixCollection(Sorn):
def __init__(self,phase, matrices = None):
super().__init__()
self.phase = phase
self.matrices = matrices
if self.phase == 'Plasticity' and self.matrices == None :
self.time_steps = Sorn.time_steps + 1 # Total training steps
self.Wee, self.Wei, self.Wie,self.Woe, self.Te, self.Ti, self.To, self.X, self.Y, self.O = [0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps
wee, wei, wie, woe, te, ti, to, x, y, o = Plasticity.initialize_plasticity()
# Assign initial matrix to the master matrices
self.Wee[0] = wee
self.Wei[0] = wei
self.Wie[0] = wie
self.Woe[0] = woe
self.Te[0] = te
self.Ti[0] = ti
self.To[0] = to
self.X[0] = x
self.Y[0] = y
self.O[0] = o
elif self.phase == 'Plasticity' and self.matrices != None:
self.time_steps = Sorn.time_steps + 1 # Total training steps
self.Wee, self.Wei, self.Wie,self.Woe, self.Te, self.Ti,self.To, self.X, self.Y,self.O = [0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps
# Assign matrices from plasticity phase to the new master matrices for training phase
self.Wee[0] = matrices['Wee']
self.Wei[0] = matrices['Wei']
self.Wie[0] = matrices['Wie']
self.Woe[0] = matrices['Woe']
self.Te[0] = matrices['Te']
self.Ti[0] = matrices['Ti']
self.To[0] = matrices['To']
self.X[0] = matrices['X']
self.Y[0] = matrices['Y']
self.O[0] = matrices['O']
elif self.phase == 'Training':
"""NOTE:
time_steps here is diferent for plasticity or trianing phase"""
self.time_steps = Sorn.time_steps + 1 # Total training steps
self.Wee, self.Wei, self.Wie,self.Woe, self.Te, self.Ti,self.To, self.X, self.Y,self.O = [0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps, [0] * self.time_steps, [0] * self.time_steps, \
[0] * self.time_steps
# Assign matrices from plasticity phase to new respective matrices for training phase
self.Wee[0] = matrices['Wee']
self.Wei[0] = matrices['Wei']
self.Wie[0] = matrices['Wie']
self.Woe[0] = matrices['Woe']
self.Te[0] = matrices['Te']
self.Ti[0] = matrices['Ti']
self.To[0] = matrices['To']
self.X[0] = matrices['X']
self.Y[0] = matrices['Y']
self.O[0] = matrices['O']
# @staticmethod
def weight_matrix(self, wee, wei, wie, woe, i):
# Get delta_weight from Plasticity.stdp
# i - training step
self.Wee[i + 1] = wee
self.Wei[i + 1] = wei
self.Wie[i + 1] = wie
self.Woe[i + 1] = woe
return self.Wee, self.Wei, self.Wie, self.Woe
# @staticmethod
def threshold_matrix(self, te, ti,to, i):
self.Te[i + 1] = te
self.Ti[i + 1] = ti
self.To[i + 1] = to
return self.Te, self.Ti, self.To
# @staticmethod
def network_activity_t(self, excitatory_net, inhibitory_net, output_net, i):
self.X[i + 1] = excitatory_net
self.Y[i + 1] = inhibitory_net
self.O[i + 1] = output_net
return self.X, self.Y, self.O
# @staticmethod
def network_activity_t_1(self, x, y,o, i):
x_1, y_1, o_1 = [0] * self.time_steps, [0] * self.time_steps, [0] * self.time_steps
x_1[i] = x
y_1[i] = y
o_1[i] = o
return x_1, y_1, o_1
class NetworkState(Plasticity):
"""The evolution of network states"""
def __init__(self, v_t):
super().__init__()
self.v_t = v_t
def incoming_drive(self,weights,activity_vector):
# Broadcasting weight*acivity vectors
incoming = weights* activity_vector
incoming = np.array(incoming.sum(axis=0))
return incoming
def excitatory_network_state(self, wee, wei, te, x, y,white_noise_e):
""" Activity of Excitatory neurons in the network"""
xt = x[:, 1]
xt = xt.reshape(self.ne, 1)
yt = y[:, 1]
yt = yt.reshape(self.ni, 1)
incoming_drive_e = np.expand_dims(self.incoming_drive(weights = wee,activity_vector=xt),1)
incoming_drive_i = np.expand_dims(self.incoming_drive(weights = wei,activity_vector=yt),1)
if self.v_t.shape[0] < self.ne:
inp = [0]*self.ne
inp[:len(self.v_t)] = self.v_t
self.v_t = inp.copy()
tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e + np.expand_dims(np.asarray(self.v_t),1) - te
"""Heaviside step function"""
"""Implement Heaviside step function"""
heaviside_step = np.expand_dims([0.] * len(tot_incoming_drive),1)
heaviside_step[tot_incoming_drive > 0] = 1.
xt_next = np.asarray(heaviside_step.copy())
return xt_next
def inhibitory_network_state(self, wie, ti, x,white_noise_i):
# Activity of inhibitory neurons
wie = np.asarray(wie)
xt = x[:, 1]
xt = xt.reshape(Sorn.ne, 1)
incoming_drive_e = np.expand_dims(self.incoming_drive(weights = wie, activity_vector=xt),1)
tot_incoming_drive = incoming_drive_e + white_noise_i - ti
"""Implement Heaviside step function"""
heaviside_step = np.expand_dims([0.] * len(tot_incoming_drive),1)
heaviside_step[tot_incoming_drive > 0] = 1.
yt_next = np.asarray(heaviside_step.copy())
return yt_next
def recurrent_drive(self, wee, wei, te, x, y,white_noise_e):
"""Network state due to recurrent drive received by the each unit at time t+1"""
xt = x[:, 1]
xt = xt.reshape(self.ne, 1)
yt = y[:, 1]
yt = yt.reshape(self.ni, 1)
incoming_drive_e = np.expand_dims(self.incoming_drive(weights = wee,activity_vector=xt),1)
incoming_drive_i = np.expand_dims(self.incoming_drive(weights = wei,activity_vector=yt),1)
tot_incoming_drive = incoming_drive_e - incoming_drive_i + white_noise_e - te
"""Implement Heaviside step function"""
heaviside_step = np.expand_dims([0.] * len(tot_incoming_drive),1)
heaviside_step[tot_incoming_drive > 0] = 1.
xt_next = np.asarray(heaviside_step.copy())
return xt_next
def output_network_state(self,woe, to, x):
""" Output layer states
Args:
woe (array): Connection weights between Reurrent network and Output layer
to (array): Threshold of Ouput layer neurons
x (array): Excitatory recurrent network states
"""
woe = np.asarray(woe)
xt = x[:, 1]
xt = xt.reshape(Sorn.ne, 1)
incoming_drive_o = np.expand_dims(self.incoming_drive(weights=woe, activity_vector=xt), 1)
tot_incoming_drive = incoming_drive_o - to
# TODO: If output neuron is 1, the use Heavyside step function
if type(to) == list:
"""Winner takes all"""
ot_next = np.where(tot_incoming_drive == tot_incoming_drive.max(), tot_incoming_drive, 0.)
return ot_next
else:
"""Implement Heaviside step function"""
heaviside_step = np.expand_dims([0.] * len(tot_incoming_drive),1)
heaviside_step[tot_incoming_drive > 0] = 1.
return heaviside_step
```
### Helper class for training SORN
#### Build separate class to feed inputs to SORN with plasticity ON
```
class SimulateRMSorn(Sorn):
"""
Args:
inputs - one hot vector of inputs
Returns:
matrix_collection - collection of all weight matrices in dictionaries
"""
def __init__(self,phase,matrices,inputs,sequence_length, targets, reward_window_sizes, epochs):
super().__init__()
self.time_steps = np.shape(inputs)[0]*sequence_length * epochs
Sorn.time_steps = np.shape(inputs)[0]*sequence_length* epochs
# self.inputs = np.asarray(np.tile(inputs,(1,epochs)))
self.inputs = inputs
self.phase = phase
self.matrices = matrices
self.epochs = epochs
self.reward_window_sizes = reward_window_sizes
self.sequence_length = sequence_length
def train_sorn(self):
# Collect the network activity at all time steps
X_all = [0]*self.time_steps
Y_all = [0]*self.time_steps
R_all = [0]*self.time_steps
O_all = [0]*self.time_steps
Rewards,Mo,Mr = [],[],[]
frac_pos_active_conn = []
""" DONOT INITIALIZE WEIGHTS"""
matrix_collection = MatrixCollection(phase = self.phase, matrices = self.matrices)
time_steps_counter= 0
""" Generate white noise"""
white_noise_e = white_gaussian_noise(mu= 0., sigma = 0.04,t = Sorn.ne)
white_noise_i = white_gaussian_noise(mu= 0., sigma = 0.04,t = Sorn.ni)
# Buffers to get the resulting x, y and o vectors at the current time step and update the master matrix
x_buffer, y_buffer, o_buffer = np.zeros(( Sorn.ne, 2)), np.zeros((Sorn.ni, 2)), np.zeros(( Sorn.no, 2))
te_buffer, ti_buffer, to_buffer = np.zeros((Sorn.ne, 1)), np.zeros((Sorn.ni, 1)), np.zeros(( Sorn.no, 1))
# Get the matrices and rename them for ease of reading
Wee, Wei, Wie,Woe = matrix_collection.Wee, matrix_collection.Wei, matrix_collection.Wie, matrix_collection.Woe
Te, Ti,To = matrix_collection.Te, matrix_collection.Ti,matrix_collection.To
X, Y, O = matrix_collection.X, matrix_collection.Y, matrix_collection.O
i = 0
for k in tqdm.tqdm(range(self.inputs.shape[0])):
for j in range(self.sequence_length):
""" Fraction of active connections between E-E network"""
frac_pos_active_conn.append((Wee[i] > 0.0).sum())
network_state = NetworkState(self.inputs[k][j]) # Feed Input as an argument to the class
# Recurrent drive,excitatory, inhibitory and output network states
r = network_state.recurrent_drive(Wee[i], Wei[i], Te[i], X[i], Y[i], white_noise_e = 0.)
excitatory_state_xt_buffer = network_state.excitatory_network_state(Wee[i], Wei[i], Te[i], X[i], Y[i],white_noise_e = 0.)
inhibitory_state_yt_buffer = network_state.inhibitory_network_state(Wie[i], Ti[i], X[i],white_noise_i = 0.)
output_state_ot_buffer = network_state.output_network_state(Woe[i], To[i], X[i])
""" Update X and Y """
x_buffer[:, 0] = X[i][:, 1] # xt -->(becomes) xt_1
x_buffer[:, 1] = excitatory_state_xt_buffer.T # New_activation; x_buffer --> xt
y_buffer[:, 0] = Y[i][:, 1]
y_buffer[:, 1] = inhibitory_state_yt_buffer.T
o_buffer[:, 0] = O[i][:, 1]
o_buffer[:, 1] = output_state_ot_buffer.T
"""Plasticity phase"""
plasticity = Plasticity()
# Reward and mo, mr
current_reward = output_state_ot_buffer*targets[k][j]
Rewards.extend(current_reward)
mo, mr, best_reward, best_reward_window = plasticity.modulation_factor(Rewards, current_reward, self.reward_window_sizes)
print('Input %s | Target %s | predicted %s | mr %s, mo %s'%(self.inputs[k].tolist(), targets[k][j],output_state_ot_buffer, mr, mo))
Mo.append(mo)
Mr.append(mr)
# STDP, Intrinsic plasticity and Synaptic scaling
Wee_t = plasticity.stdp(Wee[i],x_buffer,mr, cutoff_weights = (0.0,1.0))
Woe_t = plasticity.ostdp(Woe[i],x_buffer,mo)
Te_t = plasticity.ip(Te[i],x_buffer)
Wee_t = Plasticity().ss(Wee_t)
Woe_t = Plasticity().ss(Woe_t)
"""Assign the matrices to the matrix collections"""
matrix_collection.weight_matrix(Wee_t, Wei[i], Wie[i],Woe_t, i)
matrix_collection.threshold_matrix(Te_t, Ti[i],To[i], i)
matrix_collection.network_activity_t(x_buffer, y_buffer,o_buffer, i)
X_all[i] = x_buffer[:,1]
Y_all[i] = y_buffer[:,1]
R_all[i] = r
O_all[i] = o_buffer[:,1]
i+=1
plastic_matrices = {'Wee':matrix_collection.Wee[-1],
'Wei': matrix_collection.Wei[-1],
'Wie':matrix_collection.Wie[-1],
'Woe':matrix_collection.Woe[-1],
'Te': matrix_collection.Te[-1], 'Ti': matrix_collection.Ti[-1],
'X': X[-1], 'Y': Y[-1]}
return plastic_matrices,X_all,Y_all,R_all,frac_pos_active_conn
training_sequence = np.repeat(np.array([[[0,0,0,1], [0,0,1,0], [0,1,0,0], [1,0,0,0]],
[[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]],
[[1,0,0,0], [0,0,1,0], [0,0,0,1], [0,1,0,0]],
[[0,0,1,0], [0,0,0,0], [0,1,0,0], [0,0,0,1]]]),
repeats=1000, axis=0)
sequence_targets = np.repeat(np.array([1,0,0,0]),repeats=1000,axis=0)
input_str = ['1234','4321', '4213', '2431']
training_input = []
targets = []
for i in range(100):
idx = random.randint(0,3)
inp = input_str[idx]
if inp == '1234':
input_seq = [[0,0,0,1], [0,0,1,0], [0,1,0,0], [1,0,0,0]]
target = [1,1,1,1]
elif inp == '4321':
input_seq = [[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]]
target = [0,0,0,0]
elif inp == '4213':
input_seq = [[1,0,0,0], [0,0,1,0], [0,0,0,1], [0,1,0,0]]
target = [0,0,0,0]
else:
input_seq = [[0,0,1,0], [0,0,0,0], [0,1,0,0], [0,0,0,1]]
target = [0,0,0,0]
training_input.append(input_seq)
targets.append(target)
print(np.asarray(training_input).shape, targets)
train_plast_inp_mat,X_all_inp,Y_all_inp,R_all, frac_pos_active_conn = SimulateRMSorn(phase = 'Plasticity',
matrices = None,
inputs = np.asarray(training_input),sequence_length = 4, targets = targets,
reward_window_sizes = [1,5,10,20],
epochs = 1).train_sorn()
```
|
github_jupyter
|
```
!pip install transformers
from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
sequence = ("In May, Churchill was still generally unpopular with many Conservatives and probably most of the Labour Party. Chamberlain "
"remained Conservative Party leader until October when ill health forced his resignation. By that time, Churchill had won the "
"doubters over and his succession as party leader was a formality."
" "
"He began his premiership by forming a five-man war cabinet which included Chamberlain as Lord President of the Council, "
"Labour leader Clement Attlee as Lord Privy Seal (later as Deputy Prime Minister), Halifax as Foreign Secretary and Labour's "
"Arthur Greenwood as a minister without portfolio. In practice, these five were augmented by the service chiefs and ministers "
"who attended the majority of meetings. The cabinet changed in size and membership as the war progressed, one of the key "
"appointments being the leading trades unionist Ernest Bevin as Minister of Labour and National Service. In response to "
"previous criticisms that there had been no clear single minister in charge of the prosecution of the war, Churchill created "
"and took the additional position of Minister of Defence, making him the most powerful wartime Prime Minister in British "
"history. He drafted outside experts into government to fulfil vital functions, especially on the Home Front. These included "
"personal friends like Lord Beaverbrook and Frederick Lindemann, who became the government's scientific advisor."
" "
"At the end of May, with the British Expeditionary Force in retreat to Dunkirk and the Fall of France seemingly imminent, "
"Halifax proposed that the government should explore the possibility of a negotiated peace settlement using the still-neutral "
"Mussolini as an intermediary. There were several high-level meetings from 26 to 28 May, including two with the French "
"premier Paul Reynaud. Churchill's resolve was to fight on, even if France capitulated, but his position remained precarious "
"until Chamberlain resolved to support him. Churchill had the full support of the two Labour members but knew he could not "
"survive as Prime Minister if both Chamberlain and Halifax were against him. In the end, by gaining the support of his outer "
"cabinet, Churchill outmanoeuvred Halifax and won Chamberlain over. Churchill believed that the only option was to fight on "
"and his use of rhetoric hardened public opinion against a peaceful resolution and prepared the British people for a long war "
"– Jenkins says Churchill's speeches were 'an inspiration for the nation, and a catharsis for Churchill himself'."
" "
"His first speech as Prime Minister, delivered to the Commons on 13 May was the 'blood, toil, tears and sweat' speech. It was "
"little more than a short statement but, Jenkins says, 'it included phrases which have reverberated down the decades'.")
inputs = tokenizer([sequence], max_length=1024, return_tensors='pt')
summary_ids = model.generate(inputs['input_ids'])
summary = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
summary
from transformers import pipeline
sequence = ("In May, Churchill was still generally unpopular with many Conservatives and probably most of the Labour Party. Chamberlain "
"remained Conservative Party leader until October when ill health forced his resignation. By that time, Churchill had won the "
"doubters over and his succession as party leader was a formality."
" "
"He began his premiership by forming a five-man war cabinet which included Chamberlain as Lord President of the Council, "
"Labour leader Clement Attlee as Lord Privy Seal (later as Deputy Prime Minister), Halifax as Foreign Secretary and Labour's "
"Arthur Greenwood as a minister without portfolio. In practice, these five were augmented by the service chiefs and ministers "
"who attended the majority of meetings. The cabinet changed in size and membership as the war progressed, one of the key "
"appointments being the leading trades unionist Ernest Bevin as Minister of Labour and National Service. In response to "
"previous criticisms that there had been no clear single minister in charge of the prosecution of the war, Churchill created "
"and took the additional position of Minister of Defence, making him the most powerful wartime Prime Minister in British "
"history. He drafted outside experts into government to fulfil vital functions, especially on the Home Front. These included "
"personal friends like Lord Beaverbrook and Frederick Lindemann, who became the government's scientific advisor."
" "
"At the end of May, with the British Expeditionary Force in retreat to Dunkirk and the Fall of France seemingly imminent, "
"Halifax proposed that the government should explore the possibility of a negotiated peace settlement using the still-neutral "
"Mussolini as an intermediary. There were several high-level meetings from 26 to 28 May, including two with the French "
"premier Paul Reynaud. Churchill's resolve was to fight on, even if France capitulated, but his position remained precarious "
"until Chamberlain resolved to support him. Churchill had the full support of the two Labour members but knew he could not "
"survive as Prime Minister if both Chamberlain and Halifax were against him. In the end, by gaining the support of his outer "
"cabinet, Churchill outmanoeuvred Halifax and won Chamberlain over. Churchill believed that the only option was to fight on "
"and his use of rhetoric hardened public opinion against a peaceful resolution and prepared the British people for a long war "
"– Jenkins says Churchill's speeches were 'an inspiration for the nation, and a catharsis for Churchill himself'."
" "
"His first speech as Prime Minister, delivered to the Commons on 13 May was the 'blood, toil, tears and sweat' speech. It was "
"little more than a short statement but, Jenkins says, 'it included phrases which have reverberated down the decades'.")
summarizer = pipeline("summarization")
summarized = summarizer(sequence, min_length = 75, max_length=1024)
summarized
```
|
github_jupyter
|
# About This Notebook
* The following notebooks utilizes the [generated outputs](https://www.kaggle.com/usaiprashanth/gptmodel-outputs) and performs some Exploratory Data Analysis
```
#loading the outputs
import joblib
withoutshuffle = joblib.load('../input/gptmodel-outputs/results (4)/withoutshuffle.pkl')
withshuffle = joblib.load('../input/gptmodel-outputs/results (3)/withshuffle.pkl')
data29 = joblib.load('../input/gptmodel-outputs/results (5)/data29.pkl')
```
* Data @param withshuffle, @param withoutshuffle @param data29 are nested arrays with following structure
> array[0] index of the document with respect to THE PILE dataset
> array[1] length of the document
> array[2] the score of the document (number of correctly predicted labels)
* The folllowing two graphs compare the score of the model with and without shuffling the evaluation data
* More information about shuffling can be found [here](https://www.kaggle.com/usaiprashanth/gpt-1-3b-model?scriptVersionId=72760342) and [here](https://www.kaggle.com/usaiprashanth/gpt-1-3b-model?scriptVersionId=72761073)
```
import matplotlib.pyplot as plt
plt.plot(withshuffle[0],withshuffle[2],'r+')
plt.plot(withoutshuffle[0],withoutshuffle[2],'r+')
```
* My original interpretation of this idea (which has been proved wrong) was that the order in which the data would be evaluated would effect the evaluation loss of model. Which is inherently false. The reasoning for this is due to the fact of there being randomness involved with the model.
```
plt.plot(data29[0],data29[2],'r+')
```
* Dividing the arrays of 0th and 29th shard into 1000 buckets and plotting their average score
```
buckets = []
plt.rcParams["figure.figsize"] = (25,3)
import numpy as np
for i in range(0,10000,10):
buckets.append(np.nanmean(withoutshuffle[2][i:i+10]))
plt.plot(buckets)
```
* Atleast for the first 10,000 samples, there doesn't seem to be any difference in the memorization of data with respect to it's position in the dataset.
* However, It is worth noting that 10,000 samples is a very small sampling for a dataset as big as [The Pile](https://pile.eleuther.ai/) and the results can significantly differ when evaluated with another shard of the dataset.
* This can be generalized by plotting the bucketed version of data29 (outputs of 29th shard of THE PILE)
```
buckets = []
plt.rcParams["figure.figsize"] = (25,3)
import numpy as np
for i in range(0,10000,10):
buckets.append(np.nanmean(data29[2][i:i+10]))
plt.plot(buckets)
#Finding means and variances
print(np.nanmean(withoutshuffle[2]),np.nanmean(data29[2]))
print(np.nanvar(withoutshuffle[2]),np.nanvar(data29[2]))
```
* Atleast of gpt-neo-1.3B model, there doesn't seem to be any correlation between the way data is memorized and the position of data within training dataset
|
github_jupyter
|
```
from datascience import *
from datascience.predicates import are
path_data = '../../../data/'
import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
%matplotlib inline
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
from urllib.request import urlopen
import re
def read_url(url):
return re.sub('\\s+', ' ', urlopen(url).read().decode())
```
# Plotting the classics
In this example, we will explore statistics for two classic novels: *The Adventures of Huckleberry Finn* by Mark Twain, and *Little Women* by Louisa May Alcott. The text of any book can be read by a computer at great speed. Books published before 1923 are currently in the *public domain*, meaning that everyone has the right to copy or use the text in any way. [Project Gutenberg](http://www.gutenberg.org/) is a website that publishes public domain books online. Using Python, we can load the text of these books directly from the web.
This example is meant to illustrate some of the broad themes of this text. Don't worry if the details of the program don't yet make sense. Instead, focus on interpreting the images generated below. Later sections of the text will describe most of the features of the Python programming language used below.
First, we read the text of both books into lists of chapters, called `huck_finn_chapters` and `little_women_chapters`. In Python, a name cannot contain any spaces, and so we will often use an underscore `_` to stand in for a space. The `=` in the lines below give a name on the left to the result of some computation described on the right. A *uniform resource locator* or *URL* is an address on the Internet for some content; in this case, the text of a book. The `#` symbol starts a comment, which is ignored by the computer but helpful for people reading the code.
```
# Read two books, fast!
huck_finn_url = 'https://www.inferentialthinking.com/chapters/01/3/huck_finn.txt'
huck_finn_text = read_url(huck_finn_url)
huck_finn_chapters = huck_finn_text.split('CHAPTER ')[44:]
little_women_url = 'https://www.inferentialthinking.com/chapters/01/3/little_women.txt'
little_women_text = read_url(little_women_url)
little_women_chapters = little_women_text.split('CHAPTER ')[1:]
```
While a computer cannot understand the text of a book, it can provide us with some insight into the structure of the text. The name `huck_finn_chapters` is currently bound to a list of all the chapters in the book. We can place them into a table to see how each chapter begins.
```
# Display the chapters of Huckleberry Finn in a table.
Table().with_column('Chapters', huck_finn_chapters)
```
Each chapter begins with a chapter number in Roman numerals, followed by the first sentence of the chapter. Project Gutenberg has printed the first word of each chapter in upper case.
|
github_jupyter
|
# House Price Prediction
<p><b>Status: <span style=color:orange;>In process</span></b></p>
##### LOAD THE FEATURE DATA
```
import pandas as pd
import numpy as np
X = pd.read_csv('../../../data/preprocessed_data/X.csv', sep=',')
print ('Feature data, shape:\nX: {}'.format(X.shape))
X.head()
y = pd.read_csv('../../../data/preprocessed_data/y.csv', sep=',', header=None)
print ('Target data, shape:\ny: {}'.format(y.shape))
y.head()
```
##### SPLIT THE DATA
```
from sklearn.model_selection import train_test_split
# set the seed for reproducibility
np.random.seed(127)
# split the dataset into 2 training and 2 testing sets
X_train, X_test, y_train, y_test = train_test_split(X.values, y.values, test_size=0.2, random_state=13)
print('Data shapes:\n')
print('X_train : {}\ny_train : {}\n\nX_test : {}\ny_test : {}'.format(X_train.shape,
y_train.shape,
X_test.shape,
y_test.shape))
```
##### DEFINE NETWORK PARAMETERS
```
# define number of attributes
n_features = X_train.shape[1]
n_target = 1 # quantitative data
# count number of samples in each set of data
n_train = X_train.shape[0]
n_test = X_test.shape[0]
# define amount of neurons
n_layer_in = n_features # 12 neurons in input layer
n_layer_h1 = 5 # first hidden layer
n_layer_h2 = 5 # second hidden layer
n_layer_out = n_target # 1 neurons in output layer
sigma_init = 0.01 # For randomized initialization
```
##### RESET TENSORFLOW GRAPH IF THERE IS ANY
```
import tensorflow as tf
# this will set up a specific seed in order to control the output
# and get more homogeneous results though every model variation
def reset_graph(seed=127):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
reset_graph()
```
##### MODEL ARCHITECTURE
```
# create symbolic variables
X = tf.placeholder(tf.float32, [None, n_layer_in], name="input")
Y = tf.placeholder(tf.float32, [None, n_layer_out], name="output")
# deploy the variables that will store the weights
W = {
'W1': tf.Variable(tf.random_normal([n_layer_in, n_layer_h1], stddev = sigma_init), name='W1'),
'W2': tf.Variable(tf.random_normal([n_layer_h1, n_layer_h2], stddev = sigma_init), name='W2'),
'W3': tf.Variable(tf.random_normal([n_layer_h2, n_layer_out], stddev = sigma_init), name='W3')
}
# deploy the variables that will store the bias
b = {
'b1': tf.Variable(tf.random_normal([n_layer_h1]), name='b1'),
'b2': tf.Variable(tf.random_normal([n_layer_h2]), name='b2'),
'b3': tf.Variable(tf.random_normal([n_layer_out]), name='b3')
}
# this will create the model architecture and output the result
def model_MLP(_X, _W, _b):
with tf.name_scope('hidden_1'):
layer_h1 = tf.nn.selu(tf.add(tf.matmul(_X,_W['W1']), _b['b1']))
with tf.name_scope('hidden_2'):
layer_h2 = tf.nn.selu(tf.add(tf.matmul(layer_h1,_W['W2']), _b['b2']))
with tf.name_scope('layer_output'):
layer_out = tf.add(tf.matmul(layer_h2,_W['W3']), _b['b3'])
return layer_out # these are the predictions
with tf.name_scope("MLP"):
y_pred = model_MLP(X, W, b)
```
##### DEFINE LEARNING RATE
```
learning_rate = 0.4
# CHOOSE A DECAYING METHOD IN HERE
model_decay = 'none' # [exponential | inverse_time | natural_exponential | polynomial | none]
global_step = tf.Variable(0, trainable=False)
decay_rate = 0.90
decay_step = 10000
if model_decay == 'exponential':
learning_rate = tf.train.exponential_decay(learning_rate, global_step, decay_step, decay_rate)
elif model_decay == 'inverse_time':
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step, decay_step, decay_rate)
elif model_decay == 'natural_exponential':
learning_rate = tf.train.natural_exp_decay(learning_rate, global_step, decay_step, decay_rate)
elif model_decay == 'polynomial':
end_learning_rate = 0.001
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, decay_step, end_learning_rate, power=0.5)
else:
decay_rate = 1.0
learning_rate = tf.train.exponential_decay(learning_rate, global_step, decay_step, decay_rate)
print('Decaying Learning Rate : ', model_decay)
```
##### DEFINE MODEL TRAINING AND MEASURE PERFORMANCE
```
with tf.name_scope("loss"):
loss = tf.square(Y - y_pred) # squared error
#loss = tf.nn.softmax(logits=y_pred) # softmax
#loss = tf.nn.log_softmax(logits=y_pred) # log-softmax
#loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=y_pred, dim=-1) # cross-entropy
#loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=y_pred) # sigmoid-cross-entropy
#loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=y_pred) # sparse-softmax-cross-entropy
loss = tf.reduce_mean(loss, name='MSE')
with tf.name_scope("train"):
#optimizer = tf.train.GradientDescentOptimizer(learning_rate) # SGD
#optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9) # MOMENTUM
#optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate) # ADAGRAD
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate) # ADADELTA
#optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=1) # RMS
training_op = optimizer.minimize(loss, global_step=global_step)
# Create summaries
tf.summary.scalar("loss", loss)
tf.summary.scalar("learn_rate", learning_rate)
# Merge all summaries into a single op to generate the summary data
merged_summary_op = tf.summary.merge_all()
```
##### DEFINE DIRECTORIES FOR RESULTS
```
import sys
import shutil
from datetime import datetime
# set up the directory to store the results for tensorboard
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
root_ckpoint = 'tf_checkpoints'
root_logdir = 'tf_logs'
logdir = '{}/run-{}/'.format(root_logdir, now)
## Try to remove tree; if failed show an error using try...except on screen
try:
shutil.rmtree(root_ckpoint)
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
```
##### EXECUTE THE MODEL
```
from datetime import datetime
# define some parameters
n_epochs = 40
display_epoch = 2 # checkpoint will also be created based on this
batch_size = 10
n_batches = int(n_train/batch_size)
# this will help to restore the model to a specific epoch
saver = tf.train.Saver(tf.global_variables())
# store the results through every epoch iteration
mse_train_list = []
mse_test_list = []
learning_list = []
prediction_results = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# write logs for tensorboard
summary_writer = tf.summary.FileWriter(logdir, graph=tf.get_default_graph())
for epoch in range(n_epochs):
for i in range(0, n_train, batch_size):
# create batches
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
# improve the model
_, _summary = sess.run([training_op, merged_summary_op], feed_dict={X:X_batch, Y:y_batch})
# Write logs at every iteration
summary_writer.add_summary(_summary)
# measure performance and display the results
if (epoch+1) % display_epoch == 0:
_mse_train = sess.run(loss, feed_dict={X: X_train, Y: y_train})
_mse_test = sess.run(loss, feed_dict={X: X_test, Y: y_test})
mse_train_list.append(_mse_train); mse_test_list.append(_mse_test)
learning_list.append(sess.run(learning_rate))
# Save model weights to disk for reproducibility
saver = tf.train.Saver(max_to_keep=15)
saver.save(sess, "{}/epoch{:04}.ckpt".format(root_ckpoint, (epoch+1)))
print("Epoch: {:04}\tTrainMSE: {:06.5f}\tTestMSE: {:06.5f}, Learning: {:06.7f}".format((epoch+1),
_mse_train,
_mse_test,
learning_list[-1]))
# store the predictuve values
prediction_results = sess.run(y_pred, feed_dict={X: X_test, Y: y_test})
predictions = sess.run(y_pred, feed_dict={X: X_test, Y: y_test})
# output comparative table
dataframe = pd.DataFrame(predictions, columns=['Prediction'])
dataframe['Target'] = y_test
dataframe['Difference'] = dataframe.Target - dataframe.Prediction
print('\nPrinting results :\n\n', dataframe)
```
##### VISUALIZE THE MODEL'S IMPROVEMENTS
```
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# set up legend
blue_patch = mpatches.Patch(color='blue', label='Train MSE')
red_patch = mpatches.Patch(color='red', label='Test MSE')
plt.legend(handles=[blue_patch,red_patch])
plt.grid()
# plot the data
plt.plot(mse_train_list, color='blue')
plt.plot(mse_test_list, color='red')
plt.xlabel('epochs (x{})'.format(display_epoch))
plt.ylabel('MSE [minimize]');
```
##### LEARNING RATE EVOLUTION
```
or_patch = mpatches.Patch(color='orange', label='Learning rate')
plt.legend(handles=[or_patch])
plt.plot(learning_list, color='orange');
plt.xlabel('epochs (x{})'.format(display_epoch))
plt.ylabel('learning rate');
```
##### VISUALIZE THE RESULTS
```
plt.figure(figsize=(15,10))
# define legend
blue_patch = mpatches.Patch(color='blue', label='Prediction')
red_patch = mpatches.Patch(color='red', label='Expected Value')
green_patch = mpatches.Patch(color='green', label='Abs Error')
plt.legend(handles=[blue_patch,red_patch, green_patch])
# plot data
x_array = np.arange(len(prediction_results))
plt.scatter(x_array, prediction_results, color='blue')
plt.scatter(x_array, y_test, color='red')
abs_error = abs(y_test-prediction_results)
plt.plot(x_array, abs_error, color='green')
plt.grid()
# define legends
plt.xlabel('index'.format(display_epoch))
plt.ylabel('MEDV');
```
##### VISUALIZE TENSORBOARD
```
from IPython.display import clear_output, Image, display, HTML
# CHECK IT ON TENSORBOARD TYPING THESE LINES IN THE COMMAND PROMPT:
# tensorboard --logdir=/tmp/tf_logs
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = b"<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
show_graph(tf.get_default_graph())
```
## ----- PREPARE THE MODEL FOR FUTURE RESTORES -----
##### SAVED VARIABLE LIST
These is the list of variables that were saved on every checkpoint after training.
.data: Contains variable values
.meta: Contains graph structure
.index: Identifies checkpoints
```
for i, var in enumerate(saver._var_list):
print('Var {}: {}'.format(i, var))
```
##### RESTORE TO CHECKPOINT
```
# select the epoch to be restored
epoch = 38
# Running a new session
print('Restoring model to Epoch {}\n'.format(epoch))
with tf.Session() as sess:
# Restore variables from disk
saver.restore(sess, '{}/epoch{:04}.ckpt'.format(root_ckpoint, epoch))
print('\nPrint expected values :')
print(y_test)
print('\nPrint predicted values :')
predictions = sess.run(y_pred, feed_dict={X: X_test})
print(predictions)
```
|
github_jupyter
|
# Word2Vec
**Learning Objectives**
1. Compile all steps into one function
2. Prepare training data for Word2Vec
3. Model and Training
4. Embedding lookup and analysis
## Introduction
Word2Vec is not a singular algorithm, rather, it is a family of model architectures and optimizations that can be used to learn word embeddings from large datasets. Embeddings learned through Word2Vec have proven to be successful on a variety of downstream natural language processing tasks.
Note: This notebook is based on [Efficient Estimation of Word Representations in Vector Space](https://arxiv.org/pdf/1301.3781.pdf) and
[Distributed
Representations of Words and Phrases and their Compositionality](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). It is not an exact implementation of the papers. Rather, it is intended to illustrate the key ideas.
These papers proposed two methods for learning representations of words:
* **Continuous Bag-of-Words Model** which predicts the middle word based on surrounding context words. The context consists of a few words before and after the current (middle) word. This architecture is called a bag-of-words model as the order of words in the context is not important.
* **Continuous Skip-gram Model** which predict words within a certain range before and after the current word in the same sentence. A worked example of this is given below.
You'll use the skip-gram approach in this notebook. First, you'll explore skip-grams and other concepts using a single sentence for illustration. Next, you'll train your own Word2Vec model on a small dataset. This notebook also contains code to export the trained embeddings and visualize them in the [TensorFlow Embedding Projector](http://projector.tensorflow.org/).
Each learning objective will correspond to a __#TODO__ in the [student lab notebook](../labs/word2vec.ipynb) -- try to complete that notebook first before reviewing this solution notebook.
## Skip-gram and Negative Sampling
While a bag-of-words model predicts a word given the neighboring context, a skip-gram model predicts the context (or neighbors) of a word, given the word itself. The model is trained on skip-grams, which are n-grams that allow tokens to be skipped (see the diagram below for an example). The context of a word can be represented through a set of skip-gram pairs of `(target_word, context_word)` where `context_word` appears in the neighboring context of `target_word`.
Consider the following sentence of 8 words.
> The wide road shimmered in the hot sun.
The context words for each of the 8 words of this sentence are defined by a window size. The window size determines the span of words on either side of a `target_word` that can be considered `context word`. Take a look at this table of skip-grams for target words based on different window sizes.
Note: For this tutorial, a window size of *n* implies n words on each side with a total window span of 2*n+1 words across a word.

The training objective of the skip-gram model is to maximize the probability of predicting context words given the target word. For a sequence of words *w<sub>1</sub>, w<sub>2</sub>, ... w<sub>T</sub>*, the objective can be written as the average log probability

where `c` is the size of the training context. The basic skip-gram formulation defines this probability using the softmax function.

where *v* and *v<sup>'<sup>* are target and context vector representations of words and *W* is vocabulary size.
Computing the denominator of this formulation involves performing a full softmax over the entire vocabulary words which is often large (10<sup>5</sup>-10<sup>7</sup>) terms.
The [Noise Contrastive Estimation](https://www.tensorflow.org/api_docs/python/tf/nn/nce_loss) loss function is an efficient approximation for a full softmax. With an objective to learn word embeddings instead of modelling the word distribution, NCE loss can be [simplified](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) to use negative sampling.
The simplified negative sampling objective for a target word is to distinguish the context word from *num_ns* negative samples drawn from noise distribution *P<sub>n</sub>(w)* of words. More precisely, an efficient approximation of full softmax over the vocabulary is, for a skip-gram pair, to pose the loss for a target word as a classification problem between the context word and *num_ns* negative samples.
A negative sample is defined as a (target_word, context_word) pair such that the context_word does not appear in the `window_size` neighborhood of the target_word. For the example sentence, these are few potential negative samples (when `window_size` is 2).
```
(hot, shimmered)
(wide, hot)
(wide, sun)
```
In the next section, you'll generate skip-grams and negative samples for a single sentence. You'll also learn about subsampling techniques and train a classification model for positive and negative training examples later in the tutorial.
## Setup
```
# Use the chown command to change the ownership of repository to user.
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install -q tqdm
# You can use any Python source file as a module by executing an import statement in some other Python source file.
# The import statement combines two operations; it searches for the named module, then it binds the
# results of that search to a name in the local scope.
import io
import itertools
import numpy as np
import os
import re
import string
import tensorflow as tf
import tqdm
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Dot, Embedding, Flatten, GlobalAveragePooling1D, Reshape
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
```
Please check your tensorflow version using the cell below.
```
# Show the currently installed version of TensorFlow
print("TensorFlow version: ",tf.version.VERSION)
SEED = 42
AUTOTUNE = tf.data.experimental.AUTOTUNE
```
### Vectorize an example sentence
Consider the following sentence:
`The wide road shimmered in the hot sun.`
Tokenize the sentence:
```
sentence = "The wide road shimmered in the hot sun"
tokens = list(sentence.lower().split())
print(len(tokens))
```
Create a vocabulary to save mappings from tokens to integer indices.
```
vocab, index = {}, 1 # start indexing from 1
vocab['<pad>'] = 0 # add a padding token
for token in tokens:
if token not in vocab:
vocab[token] = index
index += 1
vocab_size = len(vocab)
print(vocab)
```
Create an inverse vocabulary to save mappings from integer indices to tokens.
```
inverse_vocab = {index: token for token, index in vocab.items()}
print(inverse_vocab)
```
Vectorize your sentence.
```
example_sequence = [vocab[word] for word in tokens]
print(example_sequence)
```
### Generate skip-grams from one sentence
The `tf.keras.preprocessing.sequence` module provides useful functions that simplify data preparation for Word2Vec. You can use the `tf.keras.preprocessing.sequence.skipgrams` to generate skip-gram pairs from the `example_sequence` with a given `window_size` from tokens in the range `[0, vocab_size)`.
Note: `negative_samples` is set to `0` here as batching negative samples generated by this function requires a bit of code. You will use another function to perform negative sampling in the next section.
```
window_size = 2
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
example_sequence,
vocabulary_size=vocab_size,
window_size=window_size,
negative_samples=0)
print(len(positive_skip_grams))
```
Take a look at few positive skip-grams.
```
for target, context in positive_skip_grams[:5]:
print(f"({target}, {context}): ({inverse_vocab[target]}, {inverse_vocab[context]})")
```
### Negative sampling for one skip-gram
The `skipgrams` function returns all positive skip-gram pairs by sliding over a given window span. To produce additional skip-gram pairs that would serve as negative samples for training, you need to sample random words from the vocabulary. Use the `tf.random.log_uniform_candidate_sampler` function to sample `num_ns` number of negative samples for a given target word in a window. You can call the funtion on one skip-grams's target word and pass the context word as true class to exclude it from being sampled.
Key point: *num_ns* (number of negative samples per positive context word) between [5, 20] is [shown to work](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) best for smaller datasets, while *num_ns* between [2,5] suffices for larger datasets.
```
# Get target and context words for one positive skip-gram.
target_word, context_word = positive_skip_grams[0]
# Set the number of negative samples per positive context.
num_ns = 4
context_class = tf.reshape(tf.constant(context_word, dtype="int64"), (1, 1))
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class, # class that should be sampled as 'positive'
num_true=1, # each positive skip-gram has 1 positive context class
num_sampled=num_ns, # number of negative context words to sample
unique=True, # all the negative samples should be unique
range_max=vocab_size, # pick index of the samples from [0, vocab_size]
seed=SEED, # seed for reproducibility
name="negative_sampling" # name of this operation
)
print(negative_sampling_candidates)
print([inverse_vocab[index.numpy()] for index in negative_sampling_candidates])
```
### Construct one training example
For a given positive `(target_word, context_word)` skip-gram, you now also have `num_ns` negative sampled context words that do not appear in the window size neighborhood of `target_word`. Batch the `1` positive `context_word` and `num_ns` negative context words into one tensor. This produces a set of positive skip-grams (labelled as `1`) and negative samples (labelled as `0`) for each target word.
```
# Add a dimension so you can use concatenation (on the next step).
negative_sampling_candidates = tf.expand_dims(negative_sampling_candidates, 1)
# Concat positive context word with negative sampled words.
context = tf.concat([context_class, negative_sampling_candidates], 0)
# Label first context word as 1 (positive) followed by num_ns 0s (negative).
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Reshape target to shape (1,) and context and label to (num_ns+1,).
target = tf.squeeze(target_word)
context = tf.squeeze(context)
label = tf.squeeze(label)
```
Take a look at the context and the corresponding labels for the target word from the skip-gram example above.
```
print(f"target_index : {target}")
print(f"target_word : {inverse_vocab[target_word]}")
print(f"context_indices : {context}")
print(f"context_words : {[inverse_vocab[c.numpy()] for c in context]}")
print(f"label : {label}")
```
A tuple of `(target, context, label)` tensors constitutes one training example for training your skip-gram negative sampling Word2Vec model. Notice that the target is of shape `(1,)` while the context and label are of shape `(1+num_ns,)`
```
print(f"target :", target)
print(f"context :", context )
print(f"label :", label )
```
### Summary
This picture summarizes the procedure of generating training example from a sentence.

## Lab Task 1: Compile all steps into one function
### Skip-gram Sampling table
A large dataset means larger vocabulary with higher number of more frequent words such as stopwords. Training examples obtained from sampling commonly occuring words (such as `the`, `is`, `on`) don't add much useful information for the model to learn from. [Mikolov et al.](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) suggest subsampling of frequent words as a helpful practice to improve embedding quality.
The `tf.keras.preprocessing.sequence.skipgrams` function accepts a sampling table argument to encode probabilities of sampling any token. You can use the `tf.keras.preprocessing.sequence.make_sampling_table` to generate a word-frequency rank based probabilistic sampling table and pass it to `skipgrams` function. Take a look at the sampling probabilities for a `vocab_size` of 10.
```
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(size=10)
print(sampling_table)
```
`sampling_table[i]` denotes the probability of sampling the i-th most common word in a dataset. The function assumes a [Zipf's distribution](https://en.wikipedia.org/wiki/Zipf%27s_law) of the word frequencies for sampling.
Key point: The `tf.random.log_uniform_candidate_sampler` already assumes that the vocabulary frequency follows a log-uniform (Zipf's) distribution. Using these distribution weighted sampling also helps approximate the Noise Contrastive Estimation (NCE) loss with simpler loss functions for training a negative sampling objective.
### Generate training data
Compile all the steps described above into a function that can be called on a list of vectorized sentences obtained from any text dataset. Notice that the sampling table is built before sampling skip-gram word pairs. You will use this function in the later sections.
```
# Generates skip-gram pairs with negative sampling for a list of sequences
# (int-encoded sentences) based on window size, number of negative samples
# and vocabulary size.
def generate_training_data(sequences, window_size, num_ns, vocab_size, seed):
# Elements of each training example are appended to these lists.
targets, contexts, labels = [], [], []
# Build the sampling table for vocab_size tokens.
# TODO 1a
sampling_table = tf.keras.preprocessing.sequence.make_sampling_table(vocab_size)
# Iterate over all sequences (sentences) in dataset.
for sequence in tqdm.tqdm(sequences):
# Generate positive skip-gram pairs for a sequence (sentence).
positive_skip_grams, _ = tf.keras.preprocessing.sequence.skipgrams(
sequence,
vocabulary_size=vocab_size,
sampling_table=sampling_table,
window_size=window_size,
negative_samples=0)
# Iterate over each positive skip-gram pair to produce training examples
# with positive context word and negative samples.
# TODO 1b
for target_word, context_word in positive_skip_grams:
context_class = tf.expand_dims(
tf.constant([context_word], dtype="int64"), 1)
negative_sampling_candidates, _, _ = tf.random.log_uniform_candidate_sampler(
true_classes=context_class,
num_true=1,
num_sampled=num_ns,
unique=True,
range_max=vocab_size,
seed=SEED,
name="negative_sampling")
# Build context and label vectors (for one target word)
negative_sampling_candidates = tf.expand_dims(
negative_sampling_candidates, 1)
context = tf.concat([context_class, negative_sampling_candidates], 0)
label = tf.constant([1] + [0]*num_ns, dtype="int64")
# Append each element from the training example to global lists.
targets.append(target_word)
contexts.append(context)
labels.append(label)
return targets, contexts, labels
```
## Lab Task 2: Prepare training data for Word2Vec
With an understanding of how to work with one sentence for a skip-gram negative sampling based Word2Vec model, you can proceed to generate training examples from a larger list of sentences!
### Download text corpus
You will use a text file of Shakespeare's writing for this tutorial. Change the following line to run this code on your own data.
```
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
```
Read text from the file and take a look at the first few lines.
```
with open(path_to_file) as f:
lines = f.read().splitlines()
for line in lines[:20]:
print(line)
```
Use the non empty lines to construct a `tf.data.TextLineDataset` object for next steps.
```
# TODO 2a
text_ds = tf.data.TextLineDataset(path_to_file).filter(lambda x: tf.cast(tf.strings.length(x), bool))
```
### Vectorize sentences from the corpus
You can use the `TextVectorization` layer to vectorize sentences from the corpus. Learn more about using this layer in this [Text Classification](https://www.tensorflow.org/tutorials/keras/text_classification) tutorial. Notice from the first few sentences above that the text needs to be in one case and punctuation needs to be removed. To do this, define a `custom_standardization function` that can be used in the TextVectorization layer.
```
# We create a custom standardization function to lowercase the text and
# remove punctuation.
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation), '')
# Define the vocabulary size and number of words in a sequence.
vocab_size = 4096
sequence_length = 10
# Use the text vectorization layer to normalize, split, and map strings to
# integers. Set output_sequence_length length to pad all samples to same length.
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size,
output_mode='int',
output_sequence_length=sequence_length)
```
Call `adapt` on the text dataset to create vocabulary.
```
vectorize_layer.adapt(text_ds.batch(1024))
```
Once the state of the layer has been adapted to represent the text corpus, the vocabulary can be accessed with `get_vocabulary()`. This function returns a list of all vocabulary tokens sorted (descending) by their frequency.
```
# Save the created vocabulary for reference.
inverse_vocab = vectorize_layer.get_vocabulary()
print(inverse_vocab[:20])
```
The vectorize_layer can now be used to generate vectors for each element in the `text_ds`.
```
def vectorize_text(text):
text = tf.expand_dims(text, -1)
return tf.squeeze(vectorize_layer(text))
# Vectorize the data in text_ds.
text_vector_ds = text_ds.batch(1024).prefetch(AUTOTUNE).map(vectorize_layer).unbatch()
```
### Obtain sequences from the dataset
You now have a `tf.data.Dataset` of integer encoded sentences. To prepare the dataset for training a Word2Vec model, flatten the dataset into a list of sentence vector sequences. This step is required as you would iterate over each sentence in the dataset to produce positive and negative examples.
Note: Since the `generate_training_data()` defined earlier uses non-TF python/numpy functions, you could also use a `tf.py_function` or `tf.numpy_function` with `tf.data.Dataset.map()`.
```
sequences = list(text_vector_ds.as_numpy_iterator())
print(len(sequences))
```
Take a look at few examples from `sequences`.
```
for seq in sequences[:5]:
print(f"{seq} => {[inverse_vocab[i] for i in seq]}")
```
### Generate training examples from sequences
`sequences` is now a list of int encoded sentences. Just call the `generate_training_data()` function defined earlier to generate training examples for the Word2Vec model. To recap, the function iterates over each word from each sequence to collect positive and negative context words. Length of target, contexts and labels should be same, representing the total number of training examples.
```
targets, contexts, labels = generate_training_data(
sequences=sequences,
window_size=2,
num_ns=4,
vocab_size=vocab_size,
seed=SEED)
print(len(targets), len(contexts), len(labels))
```
### Configure the dataset for performance
To perform efficient batching for the potentially large number of training examples, use the `tf.data.Dataset` API. After this step, you would have a `tf.data.Dataset` object of `(target_word, context_word), (label)` elements to train your Word2Vec model!
```
BATCH_SIZE = 1024
BUFFER_SIZE = 10000
dataset = tf.data.Dataset.from_tensor_slices(((targets, contexts), labels))
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print(dataset)
```
Add `cache()` and `prefetch()` to improve performance.
```
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
print(dataset)
```
## Lab Task 3: Model and Training
The Word2Vec model can be implemented as a classifier to distinguish between true context words from skip-grams and false context words obtained through negative sampling. You can perform a dot product between the embeddings of target and context words to obtain predictions for labels and compute loss against true labels in the dataset.
### Subclassed Word2Vec Model
Use the [Keras Subclassing API](https://www.tensorflow.org/guide/keras/custom_layers_and_models) to define your Word2Vec model with the following layers:
* `target_embedding`: A `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a target word. The number of parameters in this layer are `(vocab_size * embedding_dim)`.
* `context_embedding`: Another `tf.keras.layers.Embedding` layer which looks up the embedding of a word when it appears as a context word. The number of parameters in this layer are the same as those in `target_embedding`, i.e. `(vocab_size * embedding_dim)`.
* `dots`: A `tf.keras.layers.Dot` layer that computes the dot product of target and context embeddings from a training pair.
* `flatten`: A `tf.keras.layers.Flatten` layer to flatten the results of `dots` layer into logits.
With the sublassed model, you can define the `call()` function that accepts `(target, context)` pairs which can then be passed into their corresponding embedding layer. Reshape the `context_embedding` to perform a dot product with `target_embedding` and return the flattened result.
Key point: The `target_embedding` and `context_embedding` layers can be shared as well. You could also use a concatenation of both embeddings as the final Word2Vec embedding.
```
class Word2Vec(Model):
def __init__(self, vocab_size, embedding_dim):
super(Word2Vec, self).__init__()
self.target_embedding = Embedding(vocab_size,
embedding_dim,
input_length=1,
name="w2v_embedding", )
self.context_embedding = Embedding(vocab_size,
embedding_dim,
input_length=num_ns+1)
self.dots = Dot(axes=(3,2))
self.flatten = Flatten()
def call(self, pair):
target, context = pair
we = self.target_embedding(target)
ce = self.context_embedding(context)
dots = self.dots([ce, we])
return self.flatten(dots)
```
### Define loss function and compile model
For simplicity, you can use `tf.keras.losses.CategoricalCrossEntropy` as an alternative to the negative sampling loss. If you would like to write your own custom loss function, you can also do so as follows:
``` python
def custom_loss(x_logit, y_true):
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=y_true)
```
It's time to build your model! Instantiate your Word2Vec class with an embedding dimension of 128 (you could experiment with different values). Compile the model with the `tf.keras.optimizers.Adam` optimizer.
```
# TODO 3a
embedding_dim = 128
word2vec = Word2Vec(vocab_size, embedding_dim)
word2vec.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
```
Also define a callback to log training statistics for tensorboard.
```
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="logs")
```
Train the model with `dataset` prepared above for some number of epochs.
```
word2vec.fit(dataset, epochs=20, callbacks=[tensorboard_callback])
```
Tensorboard now shows the Word2Vec model's accuracy and loss.
```
!tensorboard --bind_all --port=8081 --logdir logs
```
Run the following command in **Cloud Shell:**
<code>gcloud beta compute ssh --zone <instance-zone> <notebook-instance-name> --project <project-id> -- -L 8081:localhost:8081</code>
Make sure to replace <instance-zone>, <notebook-instance-name> and <project-id>.
In Cloud Shell, click *Web Preview* > *Change Port* and insert port number *8081*. Click *Change and Preview* to open the TensorBoard.

**To quit the TensorBoard, click Kernel > Interrupt kernel**.
## Lab Task 4: Embedding lookup and analysis
Obtain the weights from the model using `get_layer()` and `get_weights()`. The `get_vocabulary()` function provides the vocabulary to build a metadata file with one token per line.
```
# TODO 4a
weights = word2vec.get_layer('w2v_embedding').get_weights()[0]
vocab = vectorize_layer.get_vocabulary()
```
Create and save the vectors and metadata file.
```
out_v = io.open('vectors.tsv', 'w', encoding='utf-8')
out_m = io.open('metadata.tsv', 'w', encoding='utf-8')
for index, word in enumerate(vocab):
if index == 0: continue # skip 0, it's padding.
vec = weights[index]
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_m.write(word + "\n")
out_v.close()
out_m.close()
```
Download the `vectors.tsv` and `metadata.tsv` to analyze the obtained embeddings in the [Embedding Projector](https://projector.tensorflow.org/).
```
try:
from google.colab import files
files.download('vectors.tsv')
files.download('metadata.tsv')
except Exception as e:
pass
```
## Next steps
This tutorial has shown you how to implement a skip-gram Word2Vec model with negative sampling from scratch and visualize the obtained word embeddings.
* To learn more about word vectors and their mathematical representations, refer to these [notes](https://web.stanford.edu/class/cs224n/readings/cs224n-2019-notes01-wordvecs1.pdf).
* To learn more about advanced text processing, read the [Transformer model for language understanding](https://www.tensorflow.org/tutorials/text/transformer) tutorial.
* If you’re interested in pre-trained embedding models, you may also be interested in [Exploring the TF-Hub CORD-19 Swivel Embeddings](https://www.tensorflow.org/hub/tutorials/cord_19_embeddings_keras), or the [Multilingual Universal Sentence Encoder](https://www.tensorflow.org/hub/tutorials/cross_lingual_similarity_with_tf_hub_multilingual_universal_encoder)
* You may also like to train the model on a new dataset (there are many available in [TensorFlow Datasets](https://www.tensorflow.org/datasets)).
|
github_jupyter
|
```
from scipy.sparse import diags
import random
import numpy as np
import scipy as sc
import pandas as pd
import csv
import scipy.linalg as spl
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import time
import sys
sys.path.insert(0, '../../python/')
from opt_utils import *
from grad_utils import *
from ks_utils import *
from simulation_utils import *
from cv_utils import *
%matplotlib inline
```
# Generate synethic data
```
N = 10 # number of teams
T = 10 # number of seasons/rounds/years
tn = [1] * int(T * N * (N - 1)/2) # number of games between each pair of teams
```
### Gaussian Process
```
random.seed(0)
np.random.seed(0)
P_list = make_prob_matrix(T,N,r = 1,alpha = 1,mu = [0,0.2])
game_matrix_list = get_game_matrix_list_from_P(tn,P_list)
data = game_matrix_list # shape: T*N*N
```
## Oracle estimator
```
# vanilla BT
random.seed(0)
np.random.seed(0)
_, beta_oracle = gd_bt(data = P_list)
latent = beta_oracle
for i in range(N):
plt.plot(latent[:,i], label="team %d"%i)
plt.xlabel("season number")
plt.ylabel("latent parameter")
# plt.legend(loc='upper left', bbox_to_anchor=(1, 1.03, 1, 0))
```
## Kernel method
## $h = T^{-3/4}$
```
T**(-3/4)
T, N = data.shape[0:2]
ks_data = kernel_smooth(data,1/6 * T**(-1/5))
ks_data[1,:,:]
objective_pgd, beta_pgd = gd_bt(data = ks_data,verbose=True)
T, N = data.shape[0:2]
beta = beta_pgd.reshape((T,N))
f = plt.figure(1, figsize = (9,5))
ax = plt.subplot(111)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],marker = '.',label = 'Team' + str(i),linewidth=1)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# f.savefig("l2_sq_solution.pdf", bbox_inches='tight')
```
## LOOCV
```
start_time = time.time()
random.seed(0)
np.random.seed(0)
h_list = np.linspace(0.3, 0.01, 10)
# h_cv, nll_cv, beta_cv, prob_cv = cv_utils.loocv_ks(data, h_list, gd_bt, num_loocv = 200, return_prob = True, out = "notebook")
h_cv, nll_cv, beta_cv, prob_cv = loocv_ks(data, h_list, gd_bt, num_loocv = 200, return_prob = True, out = "notebook")
loo_nll_DBT, loo_prob_DBT = max(nll_cv), prob_cv[np.argmax(nll_cv)]
print("--- %s seconds ---" % (time.time() - start_time))
h_cv
f = plt.figure(1, figsize = (7,5))
size_ylabel = 20
size_xlabel = 30
size_tick = 20
nll_cv = nll_cv
plt.plot(h_list[::-1], nll_cv)
plt.xlabel(r'$h$',fontsize = size_xlabel); plt.ylabel(r"Averaged nll",fontsize = size_ylabel)
plt.tick_params(axis='both', which='major', labelsize=size_tick)
# f.savefig("cv_curve.pdf", bbox_inches='tight')
import time
start_time = time.time()
random.seed(0)
np.random.seed(0)
h = h_cv
nll_DBT, beta_DBT, prob_DBT = loo_DBT(data, h, gd_bt, num_loo = 200, return_prob = True, out = "notebook")
print("--- %s seconds ---" % (time.time() - start_time))
def get_winrate(data):
T, N = data.shape[:2]
winrate = np.sum(data, 2) / (np.sum(data,2) + np.sum(data,1))
return winrate
def loo_winrate(data,num_loo = 200):
indices = np.array(np.where(np.full(data.shape, True))).T
cum_match = np.cumsum(data.flatten())
loglikes_loo = 0
prob_loo = 0
for i in range(num_loo):
data_loo = data.copy()
rand_match = np.random.randint(np.sum(data))
rand_index = indices[np.min(np.where(cum_match >= rand_match)[0])]
data_loo[tuple(rand_index)] -= 1
winrate_loo = get_winrate(data = data_loo)
prob_loo += 1 - winrate_loo[rand_index[0],rand_index[1]]
return (-loglikes_loo/num_loo, prob_loo/num_loo)
# winrate
random.seed(0)
np.random.seed(0)
winrate = get_winrate(data)
loo_nll_wr, loo_prob_wr = loo_winrate(data)
loo_prob_wr
# vanilla BT
import time
start_time = time.time()
random.seed(0)
np.random.seed(0)
objective_vanilla_bt, beta_vanilla_bt = gd_bt(data = data,verbose = True)
loo_nll_vBT, loo_prob_vBT = loo_vBT(data,num_loo = 200)
print("--- %s seconds ---" % (time.time() - start_time))
loo_nll_vBT
loo_prob_vBT
rank_dif_estimator = [0] * 3
beta_all = [winrate,beta_vanilla_bt,beta_cv]
for i in range(len(rank_dif_estimator)):
betai = beta_all[i]
rank_dif_estimator[i] = np.mean(av_dif_rank(beta_oracle,betai))
rank_dif_estimator
df = pd.DataFrame({'estimator':['winrate','vanilla BT','DBT'],'average rank difference':rank_dif_estimator,
'LOO Prob':[loo_prob_wr,loo_prob_vBT,loo_prob_DBT],
'LOO nll':[loo_nll_wr,loo_nll_vBT,loo_nll_DBT]})
print(df.to_latex(index_names=True, escape=False, index=False,
column_format='c|c|c|c|', float_format="{:0.2f}".format,
header=True, bold_rows=True))
T, N = data.shape[0:2]
f = plt.figure(1, figsize = (10,8))
size_ylabel = 20
size_xlabel = 15
size_title = 15
size_tick = 13
size_legend = 15.4
font_title = "Times New Roman Bold"
random.seed(0)
np.random.seed(0)
color_matrix = c=np.random.rand(N,3)
beta = beta_oracle.reshape((T,N))
ax = plt.subplot(221)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"True $\beta^*$",fontsize = size_title)
plt.xlabel(r"$t$",fontsize = size_xlabel); plt.ylabel(r"${\beta}^*$",fontsize = size_ylabel,rotation = "horizontal")
bottom, top = plt.ylim()
beta = beta_cv.reshape((T,N))
ax = plt.subplot(222)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Dynamic Bradley-Terry, Gaussian Kernel",fontsize = size_title)
plt.xlabel(r"$t$",fontsize = size_xlabel); plt.ylabel(r"$\hat{\beta}$",fontsize = size_ylabel,rotation = "horizontal")
# plt.ylim((bottom, top))
beta = winrate.reshape((T,N))
ax = plt.subplot(223)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Win Rate",fontsize = size_title)
plt.xlabel("t",fontsize = size_xlabel); plt.ylabel(r"Win Rate",fontsize = 10,rotation = "vertical")
ax.legend(loc='lower left', fontsize = size_legend,labelspacing = 0.75,bbox_to_anchor=(-0.03,-0.6),ncol = 5)
beta = beta_vanilla_bt.reshape((T,N))
ax = plt.subplot(224)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Original Bradley-Terry",fontsize = size_title)
plt.xlabel(r"$t$",fontsize = size_xlabel); plt.ylabel(r"$\hat{\beta}$",fontsize = size_ylabel,rotation = "horizontal")
plt.subplots_adjust(hspace = 0.3)
plt.show()
# f.savefig("compare.pdf", bbox_inches='tight')
```
## repeated experiment
```
import time
start_time = time.time()
random.seed(0)
np.random.seed(0)
B = 20
loo_ks = 200
loo = 200
h_cv_list = []
rank_diff_DBT_list, loo_nll_DBT_list, loo_prob_DBT_list = [], [], []
rank_diff_wr_list, loo_nll_wr_list, loo_prob_wr_list = [], [], []
rank_diff_vBT_list, loo_nll_vBT_list, loo_prob_vBT_list = [], [], []
for b in range(B):
N = 10 # number of teams
T = 10 # number of seasons/rounds/years
tn = [1] * int(T * N * (N - 1)/2) # number of games between each pair of teams
[alpha,r] = [1,1]
##### get beta here #####
P_list = make_prob_matrix(T,N,r = 1,alpha = 1,mu = [0,0.2])
P_winrate = P_list.sum(axis=2)
game_matrix_list = get_game_matrix_list_from_P(tn,P_list)
data = game_matrix_list # shape: T*N*N
# true beta
_, beta_oracle = gd_bt(data = P_list)
# ks cv
h_list = np.linspace(0.15, 0.01, 10)
h_cv, nll_cv, beta_cv, prob_cv = loocv_ks(data, h_list, gd_bt, num_loocv = loo_ks, verbose = False,
return_prob = True, out = "notebook")
h_cv_list.append(h_cv)
loo_nll_DBT_list.append(max(nll_cv))
loo_prob_DBT_list.append(prob_cv[np.argmax(nll_cv)])
rank_diff_DBT_list.append(np.mean(av_dif_rank(beta_oracle,beta_cv)))
# # fixed h
# h_cv = 1/6 * T**(-1/5)
# nll_cv, beta_cv, prob_cv = loo_DBT(data, h_cv, gd_bt, num_loo = 200, return_prob = True, out = "notebook")
# h_cv_list.append(h_cv)
# loo_nll_DBT_list.append(nll_cv)
# loo_prob_DBT_list.append(prob_cv)
# rank_diff_DBT_list.append(np.mean(av_dif_rank(beta_oracle,beta_cv)))
winrate = get_winrate(data)
loo_nll_wr, loo_prob_wr = loo_winrate(data,num_loo = loo)
loo_nll_wr_list.append(loo_nll_wr)
loo_prob_wr_list.append(loo_prob_wr)
rank_diff_wr_list.append(np.mean(av_dif_rank(beta_oracle,winrate)))
objective_vanilla_bt, beta_vBT = gd_bt(data = data)
loo_nll_vBT, loo_prob_vBT = loo_vBT(data,num_loo = loo)
loo_nll_vBT_list.append(loo_nll_vBT)
loo_prob_vBT_list.append(loo_prob_vBT)
rank_diff_vBT_list.append(np.mean(av_dif_rank(beta_oracle,beta_vBT)))
print(str(b) + '-th repeat finished.')
print("--- %s seconds ---" % (time.time() - start_time))
rank_dif_estimator = [np.mean(rank_diff_wr_list),
np.mean(rank_diff_vBT_list),
np.mean(rank_diff_DBT_list)]
loo_prob_wr = np.mean(loo_prob_wr_list)
loo_prob_DBT = np.mean(loo_prob_DBT_list)
loo_prob_vBT = np.mean(loo_prob_vBT_list)
loo_nll_wr = np.mean(loo_nll_wr_list)
loo_nll_DBT = np.mean(loo_nll_DBT_list)
loo_nll_vBT = np.mean(loo_nll_vBT_list)
df = pd.DataFrame({'estimator':['winrate','vanilla BT','DBT'],'average rank difference':rank_dif_estimator,
'LOO Prob':[loo_prob_wr,loo_prob_vBT,loo_prob_DBT],
'LOO nll':[loo_nll_wr,loo_nll_vBT,loo_nll_DBT]})
print("--- %s seconds ---" % (time.time() - start_time))
print(df.to_latex(index_names=True, escape=False, index=False,
column_format='c|c|c|c|', float_format="{:0.2f}".format,
header=True, bold_rows=True))
T, N = data.shape[0:2]
f = plt.figure(1, figsize = (10,8))
size_ylabel = 20
size_xlabel = 15
size_title = 15
size_tick = 13
size_legend = 15.4
font_title = "Times New Roman Bold"
random.seed(0)
np.random.seed(0)
color_matrix = c=np.random.rand(N,3)
beta = beta_oracle.reshape((T,N))
ax = plt.subplot(221)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Oracle $\beta^o$",fontsize = size_title)
plt.xlabel(r"$T$",fontsize = size_xlabel); plt.ylabel(r"${\beta}^o$",fontsize = size_ylabel,rotation = "horizontal")
# bottom, top = plt.ylim()
beta = beta_cv.reshape((T,N))
ax = plt.subplot(222)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Dynamic Bradley-Terry, Gaussian Kernel",fontsize = size_title)
plt.xlabel(r"$T$",fontsize = size_xlabel); plt.ylabel(r"$\hat{\beta}$",fontsize = size_ylabel,rotation = "horizontal")
# plt.ylim((bottom, top))
beta = winrate.reshape((T,N))
ax = plt.subplot(223)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Win Rate",fontsize = size_title)
plt.xlabel(r"$T$",fontsize = size_xlabel); plt.ylabel(r"Win Rate",fontsize = 10,rotation = "vertical")
# ax.legend(loc='lower left', fontsize = size_legend,labelspacing = 0.75,bbox_to_anchor=(-0.03,-0.6),ncol = 5)
beta = beta_vBT.reshape((T,N))
ax = plt.subplot(224)
for i in range(N):
ax.plot(range(1,T + 1),beta[:,i],c=color_matrix[i,:],marker = '.',label = 'Team' + str(i),linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"Vanilla Bradley-Terry",fontsize = size_title)
plt.xlabel(r"$T$",fontsize = size_xlabel); plt.ylabel(r"$\hat{\beta}$",fontsize = size_ylabel,rotation = "horizontal")
plt.subplots_adjust(hspace = 0.3)
plt.show()
f.savefig("compare_beta_NT10_n1_ag.pdf", bbox_inches='tight')
loo_prob_DBT_list
f = plt.figure(1, figsize = (16,8))
size_ylabel = 20
size_xlabel = 15
size_title = 15
size_tick = 13
size_legend = 15.4
font_title = "Times New Roman Bold"
random.seed(0)
np.random.seed(0)
color_list = ['red','blue','green']
x_range = [i for i in range(B)]
ax = plt.subplot(311)
ax.plot(x_range,rank_diff_wr_list,c=color_list[0],marker = '.',label = 'win rate',linewidth=1)
ax.plot(x_range,rank_diff_vBT_list,c=color_list[1],marker = '.',label = 'vBT',linewidth=1)
ax.plot(x_range,rank_diff_DBT_list,c=color_list[2],marker = '.',label = 'DBT',linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"average rank difference over 20 repeats (agnostic.N,T=10,n=1)",fontsize = size_title)
plt.xlabel(r"Repeat",fontsize = size_xlabel); plt.ylabel(r"ave. rank diff.",fontsize = size_ylabel,rotation = "vertical")
ax.legend(loc='upper left', fontsize = size_legend,labelspacing = 0.75,ncol = 1)
ax = plt.subplot(312)
ax.plot(x_range,loo_nll_vBT_list,c=color_list[1],marker = '.',label = 'vBT',linewidth=1)
ax.plot(x_range,loo_nll_DBT_list,c=color_list[2],marker = '.',label = 'DBT',linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"LOO nll over 20 repeats (agnostic.N,T=10,n=1)",fontsize = size_title)
plt.xlabel(r"Repeat",fontsize = size_xlabel); plt.ylabel(r"LOO nll",fontsize = size_ylabel,rotation = "vertical")
ax.legend(loc='upper left', fontsize = size_legend,labelspacing = 0.75,ncol = 1)
ax = plt.subplot(313)
ax.plot(x_range,loo_prob_wr_list,c=color_list[0],marker = '.',label = 'win rate',linewidth=1)
ax.plot(x_range,loo_prob_vBT_list,c=color_list[1],marker = '.',label = 'vBT',linewidth=1)
ax.plot(x_range,loo_prob_DBT_list,c=color_list[2],marker = '.',label = 'DBT',linewidth=1)
ax.tick_params(axis='both', which='major', labelsize=size_tick)
plt.title(r"LOO prob over 20 repeats (agnostic.N,T=10,n=1)",fontsize = size_title)
plt.xlabel(r"Repeat",fontsize = size_xlabel); plt.ylabel(r"LOO prob",fontsize = size_ylabel,rotation = "vertical")
ax.legend(loc='upper left', fontsize = size_legend,labelspacing = 0.75,ncol = 1)
plt.subplots_adjust(hspace = 0.6)
plt.show()
f.savefig("perform_NT10_n1_ag.pdf", bbox_inches='tight')
```
|
github_jupyter
|
```
import glob
import os
import librosa
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def gen_mfcc_fn(fn, mfcc_window_size, mfcc_stride_size):
X, sample_rate = librosa.load(fn, sr=None, mono=True)
if sample_rate != 44100:
return
mfcc = librosa.feature.mfcc(X, sample_rate,
n_fft=int(mfcc_window_size * sample_rate),
hop_length=int(mfcc_stride_size * sample_rate))
return mfcc.T
def generate_mfccs_for_gmm(parent_dir,
sub_dirs,
file_ext='*.wav',
mfcc_window_size=0.02, mfcc_stride_size=0.01):
mfccs = np.empty((0, 20))
for label, sub_dir in enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
mfcc = gen_mfcc_fn(fn, mfcc_window_size, mfcc_stride_size)
if mfcc is None:
continue
mfccs = np.vstack([mfccs, mfcc])
return mfccs
parent_dir = './UrbanSound8K/audio/'
tr_sub_dirs = ['fold%d'% d for d in range(1, 2)]
mfccs_for_gmm = generate_mfccs_for_gmm(parent_dir, tr_sub_dirs)
print(mfccs_for_gmm.shape)
from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(n_components=64, verbose=10)
gmm.fit(mfccs_for_gmm)
print(mfccs_for_gmm[0].shape)
y = gmm.predict_proba(mfccs_for_gmm[:1])
print(y[0].shape)
import pickle
pickle.dump(gmm, open('gaussian_mixture_model.pkl', 'wb'))
gmm_bak = pickle.load(open('gaussian_mixture_model.pkl', 'rb'))
gmm_bak
sound_class_table = {
'air_conditioner' : 0,
'car_horn' : 1,
'children_playing' : 2,
'dog_bark' : 3,
'drilling' : 4,
'engine_idling' : 5,
'gun_shot' : 6,
'jackhammer' : 7,
'siren' : 8,
'street_music' : 9
}
def segment_window(audio_len, segment_len, segment_stride):
start = 0
while start < audio_len:
yield start, start + segment_len
start += segment_stride
def generate_labels(fn, target_class):
return 1 if int(fn.split('-')[-3]) == sound_class_table[target_class] \
else -1
def generate_F_features(parent_dir,
sub_dirs,
num_segment_needed,
target_class,
file_ext='*.wav',
mfcc_window_size=0.02,
mfcc_stride_size=0.01):
F_features, labels = np.empty((0, 64)), np.array([])
for label, sub_dir in enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
X, sample_rate = librosa.load(fn, sr=None, mono=True)
if sample_rate != 44100:
continue
segment_len = int(sample_rate * 0.1)
segment_stride = int(sample_rate * 0.05)
# file_F_features = np.empty((0, 64))
for start, end in segment_window(X.size, segment_len, segment_stride):
segment_mfccs = librosa.feature.mfcc(X[start:end], sample_rate,
n_fft=int(mfcc_window_size * sample_rate),
hop_length=int(mfcc_stride_size * sample_rate))
segment_F_features = np.sum(gmm.predict_proba(segment_mfccs.T), axis=0) \
/ (segment_mfccs.shape[1])
F_features = np.vstack([F_features, segment_F_features])
labels = np.append(labels, generate_labels(fn, target_class))
if labels.shape[0] >= num_segment_needed:
return np.array(F_features), np.array(labels, dtype=np.int)
# F_features.append(file_F_features)
print("Finished!")
return np.array(F_features), np.array(labels, dtype=np.int)
def extract_test_fn_labels(fn, duration, target_class):
label_file_path = fn.replace('wav', 'txt')
with open(label_file_path) as fd:
lines = fd.readlines()
time_sections_with_label = list(map(lambda x: (float(x[0]), float(x[1]), x[2]), map(lambda x : x.split(), lines)))
time_intervals = np.arange(0.0, duration, 0.05)
labels = np.zeros((time_intervals.shape[0]), dtype=np.int)
for idx, t in enumerate(time_intervals):
labels[idx] = -1
for time_section in time_sections_with_label:
if t < time_section[0] or t > time_section[1]:
continue
if time_section[2] == target_class:
labels[idx] = 1
break
return labels
def gen_test_fn_features(fn):
X, sample_rate = librosa.load(fn, sr=None, mono=True)
if sample_rate != 44100:
return X, sample_rate, None
segment_len = int(sample_rate * 0.1)
segment_stride = int(sample_rate * 0.05)
print(fn)
file_F_features = np.empty((0, 64))
for start, end in segment_window(X.size, segment_len, segment_stride):
segment_mfccs = librosa.feature.mfcc(X[start:end], sample_rate,
n_fft=int(0.02 * sample_rate),
hop_length=int(0.01 * sample_rate))
segment_F_features = np.sum(gmm.predict_proba(segment_mfccs.T), axis=0) \
/ (segment_mfccs.shape[1])
file_F_features = np.vstack([file_F_features, segment_F_features])
return X, sample_rate, file_F_features
def gen_testing_data_for_svm(target_class, parent_dir = '.',
sub_dirs = ['soundscapes_5_events_sub'],
file_ext='*.wav'):
F_features, labels = [], []
for label, sub_dir in enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
X, sample_rate, file_F_features = gen_test_fn_features(fn)
if file_F_features is None:
continue
fn_labels = extract_test_fn_labels(fn, X.size/sample_rate, target_class)
labels.append(fn_labels)
F_features.append(file_F_features)
print("Finished!")
return F_features, labels
# def gen_testing_data_for_svm(target_class, parent_dir = '.',
# sub_dirs = ['soundscapes_5_events_sub'],
# file_ext='*.wav'):
# F_features, labels = [], []
# fns = []
# for label, sub_dir in enumerate(sub_dirs):
# for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
# X, sample_rate, file_F_features = gen_test_fn_features(fn)
# if file_F_features is None:
# continue
# fns.append(fn)
# print(fn)
# # fn_labels = extract_test_fn_labels(fn, X.size/sample_rate, target_class)
# # labels.append(fn_labels)
# F_features.append(file_F_features)
# print("Finished!")
# return F_features, fns
def gen_training_data_for_svm(num_target_class_segment, target_class):
parent_dir = './UrbanSound8K/ByClass'
F_features_target_class, labels_target_class = generate_F_features(parent_dir,
[target_class],
num_target_class_segment,
target_class)
F_features_non_target_class = np.empty((0, 64))
labels_non_target_class = np.array([])
for k, _ in sound_class_table.items():
if k == target_class:
continue
tmp_F_features, tmp_labels = generate_F_features(parent_dir,
[k],
int(num_target_class_segment/9),
target_class)
F_features_non_target_class = np.vstack([F_features_non_target_class, tmp_F_features])
labels_non_target_class = np.append(labels_non_target_class, tmp_labels)
return np.vstack([F_features_non_target_class, F_features_target_class]), \
np.append(labels_non_target_class, labels_target_class)
X_all, y_all = gen_training_data_for_svm(1800, target_class='air_conditioner')
print(X_all.shape)
print(y_all.shape)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_all, y_all, stratify=y_all, train_size=0.85)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
from sklearn.svm import SVC
clf = SVC(kernel='rbf', C=100, gamma=10, probability=True)
clf.fit(X_train, y_train)
print("Training set score: {:.3f}".format(clf.score(X_train, y_train)))
print("Test set score: {:.3f}".format(clf.score(X_test, y_test)))
from sklearn.metrics import confusion_matrix
print(clf.classes_)
confusion_matrix(y_test, clf.predict(X_test))
import pickle
pickle.dump(clf, open('./sound_detectors/air_conditioner_detector.pkl', 'wb'))
F_features_test, labels_test = gen_testing_data_for_svm(target_class='air_conditioner',
parent_dir='./soundscapes',
sub_dirs=['air_conditioner'])
np.savetxt("./sound_detector_test_data/siren_test_features.csv", np.array(F_features_test), delimiter=",")
np.savetxt("./sound_detector_test_data/siren_test_labels.csv", np.array(labels_test), delimiter=",")
print(np.array(F_features_test).shape)
print(np.array(labels_test).shape)
from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score
recall_scores = []
precision_scores = []
f1_scores = []
accuracy_scores = []
for x, y in zip(F_features_test, labels_test):
preds = clf.predict(x)
recall_scores.append(recall_score(y, preds))
precision_scores.append(precision_score(y, preds))
f1_scores.append(f1_score(y, preds))
accuracy_scores.append(accuracy_score(y, preds))
plt.plot(recall_scores)
plt.show()
plt.plot(precision_scores)
plt.show()
plt.plot(f1_scores)
plt.show()
plt.plot(accuracy_scores)
plt.show()
# 0.696400072903
# 0.697101319222
# 0.669004671073
# 0.8357
print(np.mean(recall_scores))
print(np.mean(precision_scores))
print(np.mean(f1_scores))
print(np.mean(accuracy_scores))
print(len(F_features_test))
print(len(fns))
preds = list(map(lambda d: clf.predict(d), F_features_test))
precisions = list(map(lambda d: d.tolist().count(1)/len(d), preds))
print(len(precisions))
plt.plot(precisions)
fs = list(filter(lambda p: p[0]>=.9, zip(precisions, fns)))
print(len(fs))
print(fs)
import os
for f in fs:
# os.system('cp %s ./hownoisy/data/ByClass/air_conditioner' % (f[1]))
```
|
github_jupyter
|
# Handling Missing Data
The difference between data found in many tutorials and data in the real world is that real-world data is rarely clean and homogeneous.
In particular, many interesting datasets will have some amount of data missing.
To make matters even more complicated, different data sources may indicate missing data in different ways.
In this section, we will discuss some general considerations for missing data, discuss how Pandas chooses to represent it, and demonstrate some built-in Pandas tools for handling missing data in Python.
We'll refer to missing data in general as the following values:
* *null*
* *NaN*
* *NA*
## Trade-Offs in Missing Data Conventions
There are a number of schemes that have been developed to indicate the presence of missing data in a table or DataFrame.
Generally, they revolve around one of two strategies: using a *mask* that globally indicates missing values, or choosing a *sentinel value* that indicates a missing entry.
In the masking approach, the mask might be an entirely separate Boolean array, or it may involve appropriation of one bit in the data representation to locally indicate the null status of a value.
In the sentinel approach, the sentinel value could be some data-specific convention, such as indicating a missing integer value with -9999 or some rare bit pattern, or it could be a more global convention, such as indicating a missing floating-point value with NaN (Not a Number), a special value which is part of the IEEE floating-point specification.
None of these approaches is without trade-offs: use of a separate mask array requires allocation of an additional Boolean array, which adds overhead in both storage and computation. A sentinel value reduces the range of valid values that can be represented, and may require extra (often non-optimized) logic in CPU and GPU arithmetic. Common special values like NaN are not available for all data types.
As in most cases where no universally optimal choice exists, different languages and systems use different conventions.
For example, the R language uses reserved bit patterns within each data type as sentinel values indicating missing data, while the SciDB system uses an extra byte attached to every cell which indicates a NA state.
## Missing Data in Pandas
The way in which Pandas handles missing values is constrained by its reliance on the NumPy package, which does not have a built-in notion of NA values for non-floating-point data types.
Pandas could have followed R's lead in specifying bit patterns for each individual data type to indicate nullness, but this approach turns out to be rather unwieldy.
While R contains four basic data types, NumPy supports *far* more than this: for example, while R has a single integer type, NumPy supports *fourteen* basic integer types once you account for available precisions, signedness, and endianness of the encoding.
Reserving a specific bit pattern in all available NumPy types would lead to an unwieldy amount of overhead in special-casing various operations for various types, likely even requiring a new fork of the NumPy package. Further, for the smaller data types (such as 8-bit integers), sacrificing a bit to use as a mask will significantly reduce the range of values it can represent.
NumPy does have support for masked arrays – that is, arrays that have a separate Boolean mask array attached for marking data as "good" or "bad."
Pandas could have derived from this, but the overhead in both storage, computation, and code maintenance makes that an unattractive choice.
With these constraints in mind, Pandas chose to use sentinels for missing data, and further chose to use two already-existing Python null values: the special floating-point ``NaN`` value, and the Python ``None`` object.
This choice has some side effects, as we will see, but in practice ends up being a good compromise in most cases of interest.
### ``None``: Pythonic missing data
The first sentinel value used by Pandas is ``None``, a Python singleton object that is often used for missing data in Python code.
Because it is a Python object, ``None`` cannot be used in any arbitrary NumPy/Pandas array, but only in arrays with data type ``'object'`` (i.e., arrays of Python objects):
```
import numpy as np
import pandas as pd
vals1 = np.array([1, None, 3, 4])
vals1
```
This ``dtype=object`` means that the best common type representation NumPy could infer for the contents of the array is that they are Python objects.
While this kind of object array is useful for some purposes, any operations on the data will be done at the Python level, with much more overhead than the typically fast operations seen for arrays with native types:
```
for dtype in ['object', 'int']:
print("dtype =", dtype)
%timeit np.arange(1E6, dtype=dtype).sum()
print()
```
The use of Python objects in an array also means that if you perform aggregations like ``sum()`` or ``min()`` across an array with a ``None`` value, you will generally get an error:
```
vals1.sum()
```
This reflects the fact that addition between an integer and ``None`` is undefined.
### ``NaN``: Missing numerical data
The other missing data representation, ``NaN`` (acronym for *Not a Number*), is different; it is a special floating-point value recognized by all systems that use the standard IEEE floating-point representation:
```
vals2 = np.array([1, np.nan, 3, 4])
vals2.dtype
```
Notice that NumPy chose a native floating-point type for this array: this means that unlike the object array from before, this array supports fast operations pushed into compiled code.
You should be aware that ``NaN`` is a bit like a data virus–it infects any other object it touches.
Regardless of the operation, the result of arithmetic with ``NaN`` will be another ``NaN``:
```
1 + np.nan
0 * np.nan
```
Note that this means that aggregates over the values are well defined (i.e., they don't result in an error) but not always useful:
```
vals2.sum(), vals2.min(), vals2.max()
```
NumPy does provide some special aggregations that will ignore these missing values:
```
np.nansum(vals2), np.nanmin(vals2), np.nanmax(vals2)
```
Keep in mind that ``NaN`` is specifically a floating-point value; there is no equivalent NaN value for integers, strings, or other types.
### NaN and None in Pandas
``NaN`` and ``None`` both have their place, and Pandas is built to handle the two of them nearly interchangeably, converting between them where appropriate:
```
pd.Series([1, np.nan, 2, None])
```
For types that don't have an available sentinel value, Pandas automatically type-casts when NA values are present.
For example, if we set a value in an integer array to ``np.nan``, it will automatically be upcast to a floating-point type to accommodate the NA:
```
x = pd.Series(range(2), dtype=int)
x
x[0] = None
x
```
Notice that in addition to casting the integer array to floating point, Pandas automatically converts the ``None`` to a ``NaN`` value.
(Be aware that there is a proposal to add a native integer NA to Pandas in the future; as of this writing, it has not been included).
While this type of magic may feel a bit hackish compared to the more unified approach to NA values in domain-specific languages like R, the Pandas sentinel/casting approach works quite well in practice and in my experience only rarely causes issues.
The following table lists the upcasting conventions in Pandas when NA values are introduced:
|Typeclass | Conversion When Storing NAs | NA Sentinel Value |
|--------------|-----------------------------|------------------------|
| ``floating`` | No change | ``np.nan`` |
| ``object`` | No change | ``None`` or ``np.nan`` |
| ``integer`` | Cast to ``float64`` | ``np.nan`` |
| ``boolean`` | Cast to ``object`` | ``None`` or ``np.nan`` |
Keep in mind that in Pandas, string data is always stored with an ``object`` dtype.
## Operating on Null Values
As we have seen, Pandas treats ``None`` and ``NaN`` as essentially interchangeable for indicating missing or null values.
To facilitate this convention, there are several useful methods for detecting, removing, and replacing null values in Pandas data structures.
They are:
- ``isnull()``: Generate a boolean mask indicating missing values
- ``notnull()``: Opposite of ``isnull()``
- ``dropna()``: Return a filtered version of the data
- ``fillna()``: Return a copy of the data with missing values filled or imputed
We will conclude this section with a brief exploration and demonstration of these routines.
### Detecting null values
Pandas data structures have two useful methods for detecting null data: ``isnull()`` and ``notnull()``.
Either one will return a Boolean mask over the data. For example:
```
data = pd.Series([1, np.nan, 'hello', None])
data.isnull()
```
Boolean masks can be used directly as a ``Series`` or ``DataFrame`` index:
```
data[data.notnull()]
```
The ``isnull()`` and ``notnull()`` methods produce similar Boolean results for ``DataFrame``s.
### Dropping null values
In addition to the masking used before, there are the convenience methods, ``dropna()``
(which removes NA values) and ``fillna()`` (which fills in NA values). For a ``Series``,
the result is straightforward:
```
data.dropna()
```
Notice that the default behavior of `dropna()` is to leave the original DataFrame/Series untouched. In order to drop NAs from the original source one could use the argument `inplace=True`. This has to be used with caution.
```
data
data.dropna(inplace=True)
data
```
For a ``DataFrame``, there are more options.
Consider the following ``DataFrame``:
```
df = pd.DataFrame([[1, np.nan, 2],
[2, 3, 5],
[np.nan, 4, 6]])
df
```
We cannot drop single values from a ``DataFrame``; we can only drop full rows or full columns.
Depending on the application, you might want one or the other, so ``dropna()`` gives a number of options for a ``DataFrame``.
By default, ``dropna()`` will drop all rows in which *any* null value is present:
```
df.dropna()
```
Alternatively, you can drop NA values along a different axis; ``axis=1`` drops all columns containing a null value:
```
df.dropna(axis='columns')
```
But this drops some good data as well; you might rather be interested in dropping rows or columns with *all* NA values, or a majority of NA values.
This can be specified through the ``how`` or ``thresh`` parameters, which allow fine control of the number of nulls to allow through.
The default is ``how='any'``, such that any row or column (depending on the ``axis`` keyword) containing a null value will be dropped.
You can also specify ``how='all'``, which will only drop rows/columns that are *all* null values:
```
df[3] = np.nan
df
df.dropna(axis='columns', how='all')
```
For finer-grained control, the ``thresh`` parameter lets you specify a minimum number of non-null values for the row/column to be kept:
```
df.dropna(axis='rows', thresh=3)
```
Here the first and last row have been dropped, because they contain only two non-null values.
### Filling null values
Sometimes rather than dropping NA values, you'd rather replace them with a valid value.
This value might be a single number like zero, or it might be some sort of imputation or interpolation from the good values.
You could do this in-place using the ``isnull()`` method as a mask, but because it is such a common operation Pandas provides the ``fillna()`` method, which returns a copy of the array with the null values replaced.
Consider the following ``Series``:
```
data = pd.Series([1, np.nan, 2, None, 3], index=list('abcde'))
data
```
We can fill NA entries with a single value, such as zero:
```
data.fillna(0)
```
We can specify a forward-fill to propagate the previous value forward:
```
# forward-fill
data.fillna(method='ffill')
```
Or we can specify a back-fill to propagate the next values backward:
```
# back-fill
data.fillna(method='bfill')
```
For ``DataFrame``s, the options are similar, but we can also specify an ``axis`` along which the fills take place:
```
df
df.fillna(method='ffill', axis=1)
```
Notice that if a previous value is not available during a forward fill, the NA value remains.
|
github_jupyter
|
## Dependencies
```
import os, random, warnings
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from transformers import TFDistilBertModel
from tokenizers import BertWordPieceTokenizer
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, Concatenate
def seed_everything(seed=0):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
# Auxiliary functions
def plot_metrics(history, metric_list):
fig, axes = plt.subplots(len(metric_list), 1, sharex='col', figsize=(20, len(metric_list) * 5))
axes = axes.flatten()
for index, metric in enumerate(metric_list):
axes[index].plot(history[metric], label='Train %s' % metric)
axes[index].plot(history['val_%s' % metric], label='Validation %s' % metric)
axes[index].legend(loc='best', fontsize=16)
axes[index].set_title(metric)
plt.xlabel('Epochs', fontsize=16)
sns.despine()
plt.show()
def jaccard(str1, str2):
a = set(str1.lower().split())
b = set(str2.lower().split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
def evaluate_model(train_set, validation_set):
train_set['jaccard'] = train_set.apply(lambda x: jaccard(x['selected_text'], x['prediction']), axis=1)
validation_set['jaccard'] = validation_set.apply(lambda x: jaccard(x['selected_text'], x['prediction']), axis=1)
print('Train set Jaccard: %.3f' % train_set['jaccard'].mean())
print('Validation set Jaccard: %.3f' % validation_set['jaccard'].mean())
print('\nMetric by sentiment')
for sentiment in train_df['sentiment'].unique():
print('\nSentiment == %s' % sentiment)
print('Train set Jaccard: %.3f' % train_set[train_set['sentiment'] == sentiment]['jaccard'].mean())
print('Validation set Jaccard: %.3f' % validation_set[validation_set['sentiment'] == sentiment]['jaccard'].mean())
# Transformer inputs
def get_start_end(text, selected_text, offsets, max_seq_len):
# find the intersection between text and selected text
idx_start, idx_end = None, None
for index in (i for i, c in enumerate(text) if c == selected_text[0]):
if text[index:index + len(selected_text)] == selected_text:
idx_start = index
idx_end = index + len(selected_text)
break
intersection = [0] * len(text)
if idx_start != None and idx_end != None:
for char_idx in range(idx_start, idx_end):
intersection[char_idx] = 1
targets = np.zeros(len(offsets))
for i, (o1, o2) in enumerate(offsets):
if sum(intersection[o1:o2]) > 0:
targets[i] = 1
# OHE targets
target_start = np.zeros(len(offsets))
target_end = np.zeros(len(offsets))
targets_nonzero = np.nonzero(targets)[0]
if len(targets_nonzero) > 0:
target_start[targets_nonzero[0]] = 1
target_end[targets_nonzero[-1]] = 1
return target_start, target_end
def preprocess(text, selected_text, context, tokenizer, max_seq_len):
context_encoded = tokenizer.encode(context)
context_encoded = context_encoded.ids[1:-1]
encoded = tokenizer.encode(text)
encoded.pad(max_seq_len)
encoded.truncate(max_seq_len)
input_ids = encoded.ids
offsets = encoded.offsets
attention_mask = encoded.attention_mask
token_type_ids = ([0] * 3) + ([1] * (max_seq_len - 3))
input_ids = [101] + context_encoded + [102] + input_ids
# update input ids and attentions masks size
input_ids = input_ids[:-3]
attention_mask = [1] * 3 + attention_mask[:-3]
target_start, target_end = get_start_end(text, selected_text, offsets, max_seq_len)
x = [np.asarray(input_ids, dtype=np.int32),
np.asarray(attention_mask, dtype=np.int32),
np.asarray(token_type_ids, dtype=np.int32)]
y = [np.asarray(target_start, dtype=np.int32),
np.asarray(target_end, dtype=np.int32)]
return (x, y)
def get_data(df, tokenizer, MAX_LEN):
x_input_ids = []
x_attention_masks = []
x_token_type_ids = []
y_start = []
y_end = []
for row in df.itertuples():
x, y = preprocess(getattr(row, "text"), getattr(row, "selected_text"), getattr(row, "sentiment"), tokenizer, MAX_LEN)
x_input_ids.append(x[0])
x_attention_masks.append(x[1])
x_token_type_ids.append(x[2])
y_start.append(y[0])
y_end.append(y[1])
x_train = [np.asarray(x_input_ids), np.asarray(x_attention_masks), np.asarray(x_token_type_ids)]
y_train = [np.asarray(y_start), np.asarray(y_end)]
return x_train, y_train
def decode(pred_start, pred_end, text, tokenizer):
offset = tokenizer.encode(text).offsets
if pred_end >= len(offset):
pred_end = len(offset)-1
decoded_text = ""
for i in range(pred_start, pred_end+1):
decoded_text += text[offset[i][0]:offset[i][1]]
if (i+1) < len(offset) and offset[i][1] < offset[i+1][0]:
decoded_text += " "
return decoded_text
```
# Load data
```
train_df = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/train.csv')
print('Train samples: %s' % len(train_df))
display(train_df.head())
```
# Pre process
```
train_df['text'].fillna('', inplace=True)
train_df['selected_text'].fillna('', inplace=True)
train_df["text"] = train_df["text"].apply(lambda x: x.lower())
train_df["selected_text"] = train_df["selected_text"].apply(lambda x: x.lower())
train_df['text'] = train_df['text'].astype(str)
train_df['selected_text'] = train_df['selected_text'].astype(str)
```
# Model parameters
```
MAX_LEN = 128
BATCH_SIZE = 64
EPOCHS = 10
LEARNING_RATE = 1e-5
ES_PATIENCE = 2
base_path = '/kaggle/input/qa-transformers/distilbert/'
base_model_path = base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5'
config_path = base_path + 'distilbert-base-uncased-distilled-squad-config.json'
vocab_path = base_path + 'bert-large-uncased-vocab.txt'
model_path = 'model.h5'
```
# Tokenizer
```
tokenizer = BertWordPieceTokenizer(vocab_path, lowercase=True)
tokenizer.save('./')
```
# Train/validation split
```
train, validation = train_test_split(train_df, test_size=0.2, random_state=SEED)
x_train, y_train = get_data(train, tokenizer, MAX_LEN)
x_valid, y_valid = get_data(validation, tokenizer, MAX_LEN)
print('Train set size: %s' % len(x_train[0]))
print('Validation set size: %s' % len(x_valid[0]))
```
# Model
```
def model_fn():
input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids')
base_model = TFDistilBertModel.from_pretrained(base_model_path, config=config_path, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids})
last_state = sequence_output[0]
x = GlobalAveragePooling1D()(last_state)
y_start = Dense(MAX_LEN, activation='softmax', name='y_start')(x)
y_end = Dense(MAX_LEN, activation='softmax', name='y_end')(x)
model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end])
model.compile(optimizers.Adam(lr=LEARNING_RATE),
loss=losses.CategoricalCrossentropy(),
metrics=[metrics.CategoricalAccuracy()])
return model
model = model_fn()
model.summary()
```
# Train
```
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE,
restore_best_weights=True, verbose=1)
history = model.fit(x_train, y_train,
validation_data=(x_valid, y_valid),
callbacks=[es],
epochs=EPOCHS,
verbose=1).history
model.save_weights(model_path)
```
# Model loss graph
```
sns.set(style="whitegrid")
plot_metrics(history, metric_list=['loss', 'y_start_loss', 'y_end_loss', 'y_start_categorical_accuracy', 'y_end_categorical_accuracy'])
```
# Model evaluation
```
train_preds = model.predict(x_train)
valid_preds = model.predict(x_valid)
train['start'] = train_preds[0].argmax(axis=-1)
train['end'] = train_preds[1].argmax(axis=-1)
train['prediction'] = train.apply(lambda x: decode(x['start'], x['end'], x['text'], tokenizer), axis=1)
train["prediction"] = train["prediction"].apply(lambda x: '.' if x.strip() == '' else x)
validation['start'] = valid_preds[0].argmax(axis=-1)
validation['end'] = valid_preds[1].argmax(axis=-1)
validation['prediction'] = validation.apply(lambda x: decode(x['start'], x['end'], x['text'], tokenizer), axis=1)
validation["prediction"] = validation["prediction"].apply(lambda x: '.' if x.strip() == '' else x)
evaluate_model(train, validation)
```
# Visualize predictions
```
print('Train set')
display(train.head(10))
print('Validation set')
display(validation.head(10))
```
|
github_jupyter
|
<div class="alert alert-block alert-info">
Section of the book chapter: <b>5.3 Model Selection, Optimization and Evaluation</b>
</div>
# 5. Model Selection and Evaluation
**Table of Contents**
* [5.1 Hyperparameter Optimization](#5.1-Hyperparameter-Optimization)
* [5.2 Model Evaluation](#5.2-Model-Evaluation)
**Learnings:**
- how to optimize machine learning (ML) models with grid search, random search and Bayesian optimization,
- how to evaluate ML models.
### Packages
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.ensemble import RandomForestRegressor
import utils
```
### Read in Data
**Dataset:** Felix M. Riese and Sina Keller, "Hyperspectral benchmark dataset on soil moisture", Dataset, Zenodo, 2018. [DOI:10.5281/zenodo.1227836](http://doi.org/10.5281/zenodo.1227836) and [GitHub](https://github.com/felixriese/hyperspectral-soilmoisture-dataset)
**Introducing paper:** Felix M. Riese and Sina Keller, “Introducing a Framework of Self-Organizing Maps for Regression of Soil Moisture with Hyperspectral Data,” in IGARSS 2018 - 2018 IEEE International Geoscience and Remote Sensing Symposium, Valencia, Spain, 2018, pp. 6151-6154. [DOI:10.1109/IGARSS.2018.8517812](https://doi.org/10.1109/IGARSS.2018.8517812)
```
X_train, X_test, y_train, y_test = utils.get_xy_split()
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
```
### Fix Random State
```
np.random.seed(42)
```
***
## 5.1 Hyperparameter Optimization
Content:
- [5.1.1 Grid Search](#5.1.1-Grid-Search)
- [5.1.2 Randomized Search](#5.1.2-Randomized-Search)
- [5.1.3 Bayesian Optimization](#5.1.3-Bayesian-Optimization)
### 5.1.1 Grid Search
```
# NBVAL_IGNORE_OUTPUT
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
# example mode: support vector regressor
model = SVR(kernel="rbf")
# define parameter grid to be tested
params = {
"C": np.logspace(-4, 4, 9),
"gamma": np.logspace(-4, 4, 9)}
# set up grid search and run it on the data
gs = GridSearchCV(model, params)
%timeit gs.fit(X_train, y_train)
print("R2 score = {0:.2f} %".format(gs.score(X_test, y_test)*100))
```
### 5.1.2 Randomized Search
```
# NBVAL_IGNORE_OUTPUT
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
# example mode: support vector regressor
model = SVR(kernel="rbf")
# define parameter grid to be tested
params = {
"C": np.logspace(-4, 4, 9),
"gamma": np.logspace(-4, 4, 9)}
# set up grid search and run it on the data
gsr = RandomizedSearchCV(model, params, n_iter=15, refit=True)
%timeit gsr.fit(X_train, y_train)
print("R2 score = {0:.2f} %".format(gsr.score(X_test, y_test)*100))
```
### 5.1.3 Bayesian Optimization
Implementation: [github.com/fmfn/BayesianOptimization](https://github.com/fmfn/BayesianOptimization)
```
# NBVAL_IGNORE_OUTPUT
from sklearn.svm import SVR
from bayes_opt import BayesianOptimization
# define function to be optimized
def opt_func(C, gamma):
model = SVR(C=C, gamma=gamma)
return model.fit(X_train, y_train).score(X_test, y_test)
# set bounded region of parameter space
pbounds = {'C': (1e-5, 1e4), 'gamma': (1e-5, 1e4)}
# define optimizer
optimizer = BayesianOptimization(
f=opt_func,
pbounds=pbounds,
random_state=1)
# optimize
%time optimizer.maximize(init_points=2, n_iter=15)
print("R2 score = {0:.2f} %".format(optimizer.max["target"]*100))
```
***
## 5.2 Model Evaluation
Content:
- [5.2.1 Generate Exemplary Data](#5.2.1-Generate-Exemplary-Data)
- [5.2.2 Plot the Data](#5.2.2-Plot-the-Data)
- [5.2.3 Evaluation Metrics](#5.2.3-Evaluation-Metrics)
```
import sklearn.metrics as me
```
### 5.2.1 Generate Exemplary Data
```
### generate example data
np.random.seed(1)
# define x grid
x_grid = np.linspace(0, 10, 11)
y_model = x_grid*0.5
# define first dataset without outlier
y1 = np.array([y + np.random.normal(scale=0.2) for y in y_model])
# define second dataset with outlier
y2 = np.copy(y1)
y2[9] = 0.5
# define third dataset with higher variance
y3 = np.array([y + np.random.normal(scale=1.0) for y in y_model])
```
### 5.2.2 Plot the Data
```
# plot example data
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12,4))
fontsize = 18
titleweight = "bold"
titlepad = 10
scatter_label = "Data"
scatter_alpha = 0.7
scatter_s = 100
ax1.scatter(x_grid, y1, label=scatter_label, alpha=scatter_alpha, s=scatter_s)
ax1.set_title("(a) Low var.", fontsize=fontsize, fontweight=titleweight, pad=titlepad)
ax2.scatter(x_grid, y2, label=scatter_label, alpha=scatter_alpha, s=scatter_s)
ax2.set_title("(b) Low var. + outlier", fontsize=fontsize, fontweight=titleweight, pad=titlepad)
ax3.scatter(x_grid, y3, label=scatter_label, alpha=scatter_alpha, s=scatter_s)
ax3.set_title("(c) Higher var.", fontsize=fontsize, fontweight=titleweight, pad=titlepad)
for i, ax in enumerate([ax1, ax2, ax3]):
i += 1
# red line
ax.plot(x_grid, y_model, label="Model", c="tab:red", linestyle="dashed", linewidth=4, alpha=scatter_alpha)
# x-axis cosmetics
ax.set_xlabel("x in a.u.", fontsize=fontsize)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
# y-axis cosmetics
if i != 1:
ax.set_yticklabels([])
else:
ax.set_ylabel("y in a.u.", fontsize=fontsize, rotation=90)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
ax.set_xlim(-0.5, 10.5)
ax.set_ylim(-0.5, 6.5)
# ax.set_title("Example "+str(i), fontsize=fontsize)
if i == 2:
ax.legend(loc=2, fontsize=fontsize*1.0, frameon=True)
plt.tight_layout()
plt.savefig("plots/metrics_plot.pdf", bbox_inches="tight")
```
### 5.2.3 Evaluation Metrics
```
# calculating the metrics
for i, y in enumerate([y1, y2, y3]):
print("Example", i+1)
print("- MAE = {:.2f}".format(me.mean_absolute_error(y_model, y)))
print("- MSE = {:.2f}".format(me.mean_squared_error(y_model, y)))
print("- RMSE = {:.2f}".format(np.sqrt(me.mean_squared_error(y_model, y))))
print("- R2 = {:.2f}%".format(me.r2_score(y_model, y)*100))
print("-"*20)
```
|
github_jupyter
|
```
import numpy as np
import matplotlib.pyplot as plt
from sympy import Symbol, integrate
%matplotlib notebook
```
### Smooth local paths
We will use cubic spirals to generate smooth local paths. Without loss of generality, as $\theta$ smoothly changes from 0 to 1, we impose a condition on the curvature as follows
$\kappa = f'(\theta) = K(\theta(1-\theta))^n $
This ensures curvature vanishes at the beginning and end of the path. Integrating, the yaw changes as
$\theta = \int_0^x f'(\theta)d\theta$
With $n = 1$ we get a cubic spiral, $n=2$ we get a quintic spiral and so on. Let us use the sympy package to find the family of spirals
1. Declare $x$ a Symbol
2. You want to find Integral of $f'(x)$
3. You can choose $K$ so that all coefficients are integers
Verify if $\theta(0) = 0$ and $\theta(1) = 1$
```
K = 30#choose for cubic/quintic
n = 2#choose for cubic/ quintic
x = Symbol('x')#declare as Symbol
print(integrate(K*(x*(1-x))**n, x)) # complete the expression
#write function to compute a cubic spiral
#input/ output can be any theta
def cubic_spiral(theta_i, theta_f, n=10):
x = np.linspace(0, 1, num=n)
theta = (-2*x**3 + 3*x**2) * (theta_f-theta_i) + theta_i
return theta
# pass
def quintic_spiral(theta_i, theta_f, n=10):
x = np.linspace(0, 1, num=n)
theta = (6*x**5 - 15*x**4 + 10*x**3)* (theta_f-theta_i) + theta_i
return theta
# pass
def circular_spiral(theta_i, theta_f, n=10):
x = np.linspace(0, 1, num=n)
theta = x* (theta_f-theta_i) + theta_i
return theta
```
### Plotting
Plot cubic, quintic spirals along with how $\theta$ will change when moving in a circular arc. Remember circular arc is when $\omega $ is constant
```
theta_i = 1.57
theta_f = 0
n = 10
x = np.linspace(0, 1, num=n)
plt.figure()
plt.plot(x,circular_spiral(theta_i, theta_f, n),label='Circular')
plt.plot(x,cubic_spiral(theta_i, theta_f, n), label='Cubic')
plt.plot(x,quintic_spiral(theta_i, theta_f, n), label='Quintic')
plt.grid()
plt.legend()
```
## Trajectory
Using the spirals, convert them to trajectories $\{(x_i,y_i,\theta_i)\}$. Remember the unicycle model
$dx = v\cos \theta dt$
$dy = v\sin \theta dt$
$\theta$ is given by the spiral functions you just wrote. Use cumsum() in numpy to calculate {}
What happens when you change $v$?
```
v = 1
dt = 0.1
theta_i = 1.57
theta_f = 0
n = 100
theta_cubic = cubic_spiral(theta_i, theta_f, n)
theta_quintic = quintic_spiral(theta_i, theta_f, int(n+(23/1000)*n))
theta_circular = circular_spiral(theta_i, theta_f, int(n-(48/1000)*n))
# print(theta)
def trajectory(v,dt,theta):
dx = v*np.cos(theta) *dt
dy = v*np.sin(theta) *dt
# print(dx)
x = np.cumsum(dx)
y = np.cumsum(dy)
return x,y
# plot trajectories for circular/ cubic/ quintic
plt.figure()
plt.plot(*trajectory(v,dt,theta_circular), label='Circular')
plt.plot(*trajectory(v,dt,theta_cubic), label='Cubic')
plt.plot(*trajectory(v,dt,theta_quintic), label='Quintic')
plt.grid()
plt.legend()
```
## Symmetric poses
We have been doing only examples with $|\theta_i - \theta_f| = \pi/2$.
What about other orientation changes? Given below is an array of terminal angles (they are in degrees!). Start from 0 deg and plot the family of trajectories
```
dt = 0.1
thetas = [15, 30, 45, 60, 90, 120, 150, 180] #convert to radians
plt.figure()
for tf in thetas:
t = cubic_spiral(0, np.deg2rad(tf),50)
x = np.cumsum(np.cos(t)*dt)
y = np.cumsum(np.sin(t)*dt)
plt.plot(x, y, label=f'0 to {tf} degree')
plt.grid()
plt.legend()
# On the same plot, move from 180 to 180 - theta
#thetas =
plt.figure()
for tf in thetas:
t = cubic_spiral(np.pi, np.pi-np.deg2rad(tf),50)
x = np.cumsum(np.cos(t)*dt)
y = np.cumsum(np.sin(t)*dt)
plt.plot(x, y, label=f'180 to {180-tf} degree')
plt.grid()
plt.legend()
```
Modify your code to print the following for the positive terminal angles $\{\theta_f\}$
1. Final x, y position in corresponding trajectory: $x_f, y_f$
2. $\frac{y_f}{x_f}$ and $\tan \frac{\theta_f}{2}$
What do you notice?
What happens when $v$ is doubled?
```
dt = 0.1
thetas = [15, 30, 45, 60, 90, 120, 150, 180] #convert to radians
# plt.figure()
for tf in thetas:
t = cubic_spiral(0, np.deg2rad(tf),50)
x = np.cumsum(np.cos(t)*dt)
y = np.cumsum(np.sin(t)*dt)
print(f'tf: {tf} x_f : {x[-1]} y_f: {y[-1]} y_f/x_f : {y[-1]/x[-1]} tan (theta_f/2) : {np.tan(np.deg2rad(tf)/2)}')
```
These are called *symmetric poses*. With this spiral-fitting approach, only symmetric poses can be reached.
In order to move between any 2 arbitrary poses, you will have to find an intermediate pose that is pair-wise symmetric to the start and the end pose.
What should be the intermediate pose? There are infinite possibilities. We would have to formulate it as an optimization problem. As they say, that has to be left for another time!
```
```
|
github_jupyter
|
## Add cancer analysis
Analysis of results from `run_add_cancer_classification.py`.
We hypothesized that adding cancers in a principled way (e.g. by similarity to the target cancer) would lead to improved performance relative to both a single-cancer model (using only the target cancer type), and a pan-cancer model using all cancer types without regard for similarity to the target cancer.
Script parameters:
* RESULTS_DIR: directory to read experiment results from
* IDENTIFIER: {gene}\_{cancer_type} target identifier to plot results for
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pancancer_evaluation.config as cfg
import pancancer_evaluation.utilities.analysis_utilities as au
RESULTS_DIR = os.path.join(cfg.repo_root, 'add_cancer_results', 'add_cancer')
```
### Load data
```
add_cancer_df = au.load_add_cancer_results(RESULTS_DIR, load_cancer_types=True)
print(add_cancer_df.shape)
add_cancer_df.sort_values(by=['gene', 'holdout_cancer_type']).head()
# load data from previous single-cancer and pan-cancer experiments
# this is to put the add cancer results in the context of our previous results
pancancer_dir = os.path.join(cfg.results_dir, 'pancancer')
pancancer_dir2 = os.path.join(cfg.results_dir, 'vogelstein_s1_results', 'pancancer')
single_cancer_dir = os.path.join(cfg.results_dir, 'single_cancer')
single_cancer_dir2 = os.path.join(cfg.results_dir, 'vogelstein_s1_results', 'single_cancer')
single_cancer_df1 = au.load_prediction_results(single_cancer_dir, 'single_cancer')
single_cancer_df2 = au.load_prediction_results(single_cancer_dir2, 'single_cancer')
single_cancer_df = pd.concat((single_cancer_df1, single_cancer_df2))
print(single_cancer_df.shape)
single_cancer_df.head()
pancancer_df1 = au.load_prediction_results(pancancer_dir, 'pancancer')
pancancer_df2 = au.load_prediction_results(pancancer_dir2, 'pancancer')
pancancer_df = pd.concat((pancancer_df1, pancancer_df2))
print(pancancer_df.shape)
pancancer_df.head()
single_cancer_comparison_df = au.compare_results(single_cancer_df,
identifier='identifier',
metric='aupr',
correction=True,
correction_alpha=0.001,
verbose=False)
pancancer_comparison_df = au.compare_results(pancancer_df,
identifier='identifier',
metric='aupr',
correction=True,
correction_alpha=0.001,
verbose=False)
experiment_comparison_df = au.compare_results(single_cancer_df,
pancancer_df=pancancer_df,
identifier='identifier',
metric='aupr',
correction=True,
correction_alpha=0.05,
verbose=False)
experiment_comparison_df.sort_values(by='corr_pval').head(n=10)
```
### Plot change in performance as cancers are added
```
IDENTIFIER = 'BRAF_COAD'
# IDENTIFIER = 'EGFR_ESCA'
# IDENTIFIER = 'EGFR_LGG'
# IDENTIFIER = 'KRAS_CESC'
# IDENTIFIER = 'PIK3CA_ESCA'
# IDENTIFIER = 'PIK3CA_STAD'
# IDENTIFIER = 'PTEN_COAD'
# IDENTIFIER = 'PTEN_BLCA'
# IDENTIFIER = 'TP53_OV'
# IDENTIFIER = 'NF1_GBM'
GENE = IDENTIFIER.split('_')[0]
gene_df = add_cancer_df[(add_cancer_df.gene == GENE) &
(add_cancer_df.data_type == 'test') &
(add_cancer_df.signal == 'signal')].copy()
# make seaborn treat x axis as categorical
gene_df.num_train_cancer_types = gene_df.num_train_cancer_types.astype(str)
gene_df.loc[(gene_df.num_train_cancer_types == '-1'), 'num_train_cancer_types'] = 'all'
sns.set({'figure.figsize': (14, 6)})
sns.pointplot(data=gene_df, x='num_train_cancer_types', y='aupr', hue='identifier',
order=['0', '1', '2', '4', 'all'])
plt.legend(bbox_to_anchor=(1.15, 0.5), loc='center right', borderaxespad=0., title='Cancer type')
plt.title('Adding cancer types by confusion matrix similarity, {} mutation prediction'.format(GENE), size=13)
plt.xlabel('Number of added cancer types', size=13)
plt.ylabel('AUPR', size=13)
id_df = add_cancer_df[(add_cancer_df.identifier == IDENTIFIER) &
(add_cancer_df.data_type == 'test') &
(add_cancer_df.signal == 'signal')].copy()
# make seaborn treat x axis as categorical
id_df.num_train_cancer_types = id_df.num_train_cancer_types.astype(str)
id_df.loc[(id_df.num_train_cancer_types == '-1'), 'num_train_cancer_types'] = 'all'
sns.set({'figure.figsize': (14, 6)})
cat_order = ['0', '1', '2', '4', 'all']
sns.pointplot(data=id_df, x='num_train_cancer_types', y='aupr', hue='identifier',
order=cat_order)
plt.legend([],[], frameon=False)
plt.title('Adding cancer types by confusion matrix similarity, {} mutation prediction'.format(IDENTIFIER),
size=13)
plt.xlabel('Number of added cancer types', size=13)
plt.ylabel('AUPR', size=13)
# annotate points with cancer types
def label_points(x, y, cancer_types, gene, ax):
a = pd.DataFrame({'x': x, 'y': y, 'cancer_types': cancer_types})
for i, point in a.iterrows():
if gene in ['TP53', 'PIK3CA'] and point['x'] == 4:
ax.text(point['x']+0.05,
point['y']+0.005,
str(point['cancer_types'].replace(' ', '\n')),
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'),
ha='left', va='center')
else:
ax.text(point['x']+0.05,
point['y']+0.005,
str(point['cancer_types'].replace(' ', '\n')),
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round'))
cat_to_loc = {c: i for i, c in enumerate(cat_order)}
group_id_df = (
id_df.groupby(['num_train_cancer_types', 'train_cancer_types'])
.mean()
.reset_index()
)
label_points([cat_to_loc[c] for c in group_id_df.num_train_cancer_types],
group_id_df.aupr,
group_id_df.train_cancer_types,
GENE,
plt.gca())
```
### Plot gene/cancer type "best model" performance vs. single/pan-cancer models
```
id_df = add_cancer_df[(add_cancer_df.identifier == IDENTIFIER) &
(add_cancer_df.data_type == 'test')].copy()
best_num = (
id_df[id_df.signal == 'signal']
.groupby('num_train_cancer_types')
.mean()
.reset_index()
.sort_values(by='aupr', ascending=False)
.iloc[0, 0]
)
print(best_num)
best_id_df = (
id_df.loc[id_df.num_train_cancer_types == best_num, :]
.drop(columns=['num_train_cancer_types', 'how_to_add', 'train_cancer_types'])
)
best_id_df['train_set'] = 'best_add'
sc_id_df = (
id_df.loc[id_df.num_train_cancer_types == 1, :]
.drop(columns=['num_train_cancer_types', 'how_to_add', 'train_cancer_types'])
)
sc_id_df['train_set'] = 'single_cancer'
pc_id_df = (
id_df.loc[id_df.num_train_cancer_types == -1, :]
.drop(columns=['num_train_cancer_types', 'how_to_add', 'train_cancer_types'])
)
pc_id_df['train_set'] = 'pancancer'
all_id_df = pd.concat((sc_id_df, best_id_df, pc_id_df), sort=False)
all_id_df.head()
sns.set()
sns.boxplot(data=all_id_df, x='train_set', y='aupr', hue='signal', hue_order=['signal', 'shuffled'])
plt.title('{}, single/best/pancancer predictors'.format(IDENTIFIER))
plt.xlabel('Training data')
plt.ylabel('AUPR')
plt.legend(title='Signal')
print('Single cancer significance: {}'.format(
single_cancer_comparison_df.loc[single_cancer_comparison_df.identifier == IDENTIFIER, 'reject_null'].values[0]
))
print('Pan-cancer significance: {}'.format(
pancancer_comparison_df.loc[pancancer_comparison_df.identifier == IDENTIFIER, 'reject_null'].values[0]
))
# Q2: where is this example in the single vs. pan-cancer volcano plot?
# see pancancer only experiments for an example of this sort of thing
experiment_comparison_df['nlog10_p'] = -np.log(experiment_comparison_df.corr_pval)
sns.set({'figure.figsize': (8, 6)})
sns.scatterplot(data=experiment_comparison_df, x='delta_mean', y='nlog10_p',
hue='reject_null', alpha=0.3)
plt.xlabel('AUPRC(pancancer) - AUPRC(single cancer)')
plt.ylabel(r'$-\log_{10}($adjusted p-value$)$')
plt.title('Highlight {} in pancancer vs. single-cancer comparison'.format(IDENTIFIER))
def highlight_id(x, y, val, ax, id_to_plot):
a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1)
for i, point in a.iterrows():
if point['val'] == id_to_plot:
ax.scatter(point['x'], point['y'], color='red', marker='+', s=100)
highlight_id(experiment_comparison_df.delta_mean,
experiment_comparison_df.nlog10_p,
experiment_comparison_df.identifier,
plt.gca(),
IDENTIFIER)
```
Overall, these results weren't quite as convincing as we were expecting. Although there are a few gene/cancer type combinations where there is a clear improvement when one or two relevant cancer types are added, overall there isn't much change in many cases (see first line plots of multiple cancer types).
Biologically speaking, this isn't too surprising for a few reasons:
* Some genes aren’t drivers in certain cancer types
* Some genes have very cancer-specific effects
* Some genes (e.g. TP53) have very well-preserved effects across all cancers
We think there could be room for improvement as far as cancer type selection (some of the cancers chosen don't make a ton of sense), but overall we're a bit skeptical that this approach will lead to models that generalize better than a single-cancer model in most cases.
|
github_jupyter
|
# Reshaping & Tidy Data
> Structuring datasets to facilitate analysis [(Wickham 2014)](http://www.jstatsoft.org/v59/i10/paper)
So, you've sat down to analyze a new dataset.
What do you do first?
In episode 11 of [Not So Standard Deviations](https://www.patreon.com/NSSDeviations?ty=h), Hilary and Roger discussed their typical approaches.
I'm with Hilary on this one, you should make sure your data is tidy.
Before you do any plots, filtering, transformations, summary statistics, regressions...
Without a tidy dataset, you'll be fighting your tools to get the result you need.
With a tidy dataset, it's relatively easy to do all of those.
Hadley Wickham kindly summarized tidiness as a dataset where
1. Each variable forms a column
2. Each observation forms a row
3. Each type of observational unit forms a table
And today we'll only concern ourselves with the first two.
As quoted at the top, this really is about facilitating analysis: going as quickly as possible from question to answer.
```
%matplotlib inline
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
if int(os.environ.get("MODERN_PANDAS_EPUB", 0)):
import prep # noqa
pd.options.display.max_rows = 10
sns.set(style='ticks', context='talk')
```
## NBA Data
[This](http://stackoverflow.com/questions/22695680/python-pandas-timedelta-specific-rows) StackOverflow question asked about calculating the number of days of rest NBA teams have between games.
The answer would have been difficult to compute with the raw data.
After transforming the dataset to be tidy, we're able to quickly get the answer.
We'll grab some NBA game data from basketball-reference.com using pandas' `read_html` function, which returns a list of DataFrames.
```
fp = 'data/nba.csv'
if not os.path.exists(fp):
tables = pd.read_html("http://www.basketball-reference.com/leagues/NBA_2016_games.html")
games = tables[0]
games.to_csv(fp)
else:
games = pd.read_csv(fp, index_col=0)
games.head()
```
Side note: pandas' `read_html` is pretty good. On simple websites it almost always works.
It provides a couple parameters for controlling what gets selected from the webpage if the defaults fail.
I'll always use it first, before moving on to [BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/) or [lxml](http://lxml.de/) if the page is more complicated.
As you can see, we have a bit of general munging to do before tidying.
Each month slips in an extra row of mostly NaNs, the column names aren't too useful, and we have some dtypes to fix up.
```
column_names = {'Date': 'date', 'Start (ET)': 'start',
'Unamed: 2': 'box', 'Visitor/Neutral': 'away_team',
'PTS': 'away_points', 'Home/Neutral': 'home_team',
'PTS.1': 'home_points', 'Unamed: 7': 'n_ot'}
games = (games.rename(columns=column_names)
.dropna(thresh=4)
[['date', 'away_team', 'away_points', 'home_team', 'home_points']]
.assign(date=lambda x: pd.to_datetime(x['date'], format='%a, %b %d, %Y'))
.set_index('date', append=True)
.rename_axis(["game_id", "date"])
.sort_index())
games.head()
```
A quick aside on that last block.
- `dropna` has a `thresh` argument. If at least `thresh` items are missing, the row is dropped. We used it to remove the "Month headers" that slipped into the table.
- `assign` can take a callable. This lets us refer to the DataFrame in the previous step of the chain. Otherwise we would have to assign `temp_df = games.dropna()...` And then do the `pd.to_datetime` on that.
- `set_index` has an `append` keyword. We keep the original index around since it will be our unique identifier per game.
- We use `.rename_axis` to set the index names (this behavior is new in pandas 0.18; before `.rename_axis` only took a mapping for changing labels).
The Question:
> **How many days of rest did each team get between each game?**
Whether or not your dataset is tidy depends on your question. Given our question, what is an observation?
In this case, an observation is a `(team, game)` pair, which we don't have yet. Rather, we have two observations per row, one for home and one for away. We'll fix that with `pd.melt`.
`pd.melt` works by taking observations that are spread across columns (`away_team`, `home_team`), and melting them down into one column with multiple rows. However, we don't want to lose the metadata (like `game_id` and `date`) that is shared between the observations. By including those columns as `id_vars`, the values will be repeated as many times as needed to stay with their observations.
```
tidy = pd.melt(games.reset_index(),
id_vars=['game_id', 'date'], value_vars=['away_team', 'home_team'],
value_name='team')
tidy.head()
```
The DataFrame `tidy` meets our rules for tidiness: each variable is in a column, and each observation (`team`, `date` pair) is on its own row.
Now the translation from question ("How many days of rest between games") to operation ("date of today's game - date of previous game - 1") is direct:
```
# For each team... get number of days between games
tidy.groupby('team')['date'].diff().dt.days - 1
```
That's the essence of tidy data, the reason why it's worth considering what shape your data should be in.
It's about setting yourself up for success so that the answers naturally flow from the data (just kidding, it's usually still difficult. But hopefully less so).
Let's assign that back into our DataFrame
```
tidy['rest'] = tidy.sort_values('date').groupby('team').date.diff().dt.days - 1
tidy.dropna().head()
```
To show the inverse of `melt`, let's take `rest` values we just calculated and place them back in the original DataFrame with a `pivot_table`.
```
by_game = (pd.pivot_table(tidy, values='rest',
index=['game_id', 'date'],
columns='variable')
.rename(columns={'away_team': 'away_rest',
'home_team': 'home_rest'}))
df = pd.concat([games, by_game], axis=1)
df.dropna().head()
```
One somewhat subtle point: an "observation" depends on the question being asked.
So really, we have two tidy datasets, `tidy` for answering team-level questions, and `df` for answering game-level questions.
One potentially interesting question is "what was each team's average days of rest, at home and on the road?" With a tidy dataset (the DataFrame `tidy`, since it's team-level), `seaborn` makes this easy (more on seaborn in a future post):
```
sns.set(style='ticks', context='paper')
g = sns.FacetGrid(tidy, col='team', col_wrap=6, hue='team', size=2)
g.map(sns.barplot, 'variable', 'rest');
```
An example of a game-level statistic is the distribution of rest differences in games:
```
df['home_win'] = df['home_points'] > df['away_points']
df['rest_spread'] = df['home_rest'] - df['away_rest']
df.dropna().head()
delta = (by_game.home_rest - by_game.away_rest).dropna().astype(int)
ax = (delta.value_counts()
.reindex(np.arange(delta.min(), delta.max() + 1), fill_value=0)
.sort_index()
.plot(kind='bar', color='k', width=.9, rot=0, figsize=(12, 6))
)
sns.despine()
ax.set(xlabel='Difference in Rest (Home - Away)', ylabel='Games');
```
Or the win percent by rest difference
```
fig, ax = plt.subplots(figsize=(12, 6))
sns.barplot(x='rest_spread', y='home_win', data=df.query('-3 <= rest_spread <= 3'),
color='#4c72b0', ax=ax)
sns.despine()
```
## Stack / Unstack
Pandas has two useful methods for quickly converting from wide to long format (`stack`) and long to wide (`unstack`).
```
rest = (tidy.groupby(['date', 'variable'])
.rest.mean()
.dropna())
rest.head()
```
`rest` is in a "long" form since we have a single column of data, with multiple "columns" of metadata (in the MultiIndex). We use `.unstack` to move from long to wide.
```
rest.unstack().head()
```
`unstack` moves a level of a MultiIndex (innermost by default) up to the columns.
`stack` is the inverse.
```
rest.unstack().stack()
```
With `.unstack` you can move between those APIs that expect there data in long-format and those APIs that work with wide-format data. For example, `DataFrame.plot()`, works with wide-form data, one line per column.
```
with sns.color_palette() as pal:
b, g = pal.as_hex()[:2]
ax=(rest.unstack()
.query('away_team < 7')
.rolling(7)
.mean()
.plot(figsize=(12, 6), linewidth=3, legend=False))
ax.set(ylabel='Rest (7 day MA)')
ax.annotate("Home", (rest.index[-1][0], 1.02), color=g, size=14)
ax.annotate("Away", (rest.index[-1][0], 0.82), color=b, size=14)
sns.despine()
```
The most conenient form will depend on exactly what you're doing.
When interacting with databases you'll often deal with long form data.
Pandas' `DataFrame.plot` often expects wide-form data, while `seaborn` often expect long-form data. Regressions will expect wide-form data. Either way, it's good to be comfortable with `stack` and `unstack` (and MultiIndexes) to quickly move between the two.
## Mini Project: Home Court Advantage?
We've gone to all that work tidying our dataset, let's put it to use.
What's the effect (in terms of probability to win) of being
the home team?
### Step 1: Create an outcome variable
We need to create an indicator for whether the home team won.
Add it as a column called `home_win` in `games`.
```
df['home_win'] = df.home_points > df.away_points
```
### Step 2: Find the win percent for each team
In the 10-minute literature review I did on the topic, it seems like people include a team-strength variable in their regressions.
I suppose that makes sense; if stronger teams happened to play against weaker teams at home more often than away, it'd look like the home-effect is stronger than it actually is.
We'll do a terrible job of controlling for team strength by calculating each team's win percent and using that as a predictor.
It'd be better to use some kind of independent measure of team strength, but this will do for now.
We'll use a similar `melt` operation as earlier, only now with the `home_win` variable we just created.
```
wins = (
pd.melt(df.reset_index(),
id_vars=['game_id', 'date', 'home_win'],
value_name='team', var_name='is_home',
value_vars=['home_team', 'away_team'])
.assign(win=lambda x: x.home_win == (x.is_home == 'home_team'))
.groupby(['team', 'is_home'])
.win
.agg({'n_wins': 'sum', 'n_games': 'count', 'win_pct': 'mean'})
)
wins.head()
```
Pause for visualiztion, because why not
```
g = sns.FacetGrid(wins.reset_index(), hue='team', size=7, aspect=.5, palette=['k'])
g.map(sns.pointplot, 'is_home', 'win_pct').set(ylim=(0, 1));
```
(It'd be great if there was a library built on top of matplotlib that auto-labeled each point decently well. Apparently this is a difficult problem to do in general).
```
g = sns.FacetGrid(wins.reset_index(), col='team', hue='team', col_wrap=5, size=2)
g.map(sns.pointplot, 'is_home', 'win_pct')
```
Those two graphs show that most teams have a higher win-percent at home than away. So we can continue to investigate.
Let's aggregate over home / away to get an overall win percent per team.
```
win_percent = (
# Use sum(games) / sum(games) instead of mean
# since I don't know if teams play the same
# number of games at home as away
wins.groupby(level='team', as_index=True)
.apply(lambda x: x.n_wins.sum() / x.n_games.sum())
)
win_percent.head()
win_percent.sort_values().plot.barh(figsize=(6, 12), width=.85, color='k')
plt.tight_layout()
sns.despine()
plt.xlabel("Win Percent")
```
Is there a relationship between overall team strength and their home-court advantage?
```
plt.figure(figsize=(8, 5))
(wins.win_pct
.unstack()
.assign(**{'Home Win % - Away %': lambda x: x.home_team - x.away_team,
'Overall %': lambda x: (x.home_team + x.away_team) / 2})
.pipe((sns.regplot, 'data'), x='Overall %', y='Home Win % - Away %')
)
sns.despine()
plt.tight_layout()
```
Let's get the team strength back into `df`.
You could you `pd.merge`, but I prefer `.map` when joining a `Series`.
```
df = df.assign(away_strength=df['away_team'].map(win_percent),
home_strength=df['home_team'].map(win_percent),
point_diff=df['home_points'] - df['away_points'],
rest_diff=df['home_rest'] - df['away_rest'])
df.head()
import statsmodels.formula.api as sm
df['home_win'] = df.home_win.astype(int) # for statsmodels
mod = sm.logit('home_win ~ home_strength + away_strength + home_rest + away_rest', df)
res = mod.fit()
res.summary()
```
The strength variables both have large coefficeints (really we should be using some independent measure of team strength here, `win_percent` is showing up on the left and right side of the equation). The rest variables don't seem to matter as much.
With `.assign` we can quickly explore variations in formula.
```
(sm.Logit.from_formula('home_win ~ strength_diff + rest_spread',
df.assign(strength_diff=df.home_strength - df.away_strength))
.fit().summary())
mod = sm.Logit.from_formula('home_win ~ home_rest + away_rest', df)
res = mod.fit()
res.summary()
```
Overall not seeing to much support for rest mattering, but we got to see some more tidy data.
That's it for today.
Next time we'll look at data visualization.
|
github_jupyter
|
# The Tractable Buffer Stock Model
<p style="text-align: center;"><small><small><small>Generator: BufferStockTheory-make/notebooks_byname</small></small></small></p>
The [TractableBufferStock](http://www.econ2.jhu.edu/people/ccarroll/public/LectureNotes/Consumption/TractableBufferStock/) model is a (relatively) simple framework that captures all of the qualitative, and many of the quantitative features of optimal consumption in the presence of labor income uncertainty.
```
# This cell has a bit of (uninteresting) initial setup.
import matplotlib.pyplot as plt
import numpy as np
import HARK
from time import clock
from copy import deepcopy
mystr = lambda number : "{:.3f}".format(number)
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from HARK.utilities import plotFuncs
# Import the model from the toolkit
from HARK.ConsumptionSaving.TractableBufferStockModel import TractableConsumerType
```
The key assumption behind the model's tractability is that there is only a single, stark form of uncertainty: So long as an employed consumer remains employed, that consumer's labor income $P$ will rise at a constant rate $\Gamma$:
\begin{align}
P_{t+1} &= \Gamma P_{t}
\end{align}
But, between any period and the next, there is constant hazard $p$ that the consumer will transition to the "unemployed" state. Unemployment is irreversible, like retirement or disability. When unemployed, the consumer receives a fixed amount of income (for simplicity, zero). (See the [linked handout](http://www.econ2.jhu.edu/people/ccarroll/public/LectureNotes/Consumption/TractableBufferStock/) for details of the model).
Defining $G$ as the growth rate of aggregate wages/productivity, we assume that idiosyncratic wages grow by $\Gamma = G/(1-\mho)$ where $(1-\mho)^{-1}$ is the growth rate of idiosyncratic productivity ('on-the-job learning', say). (This assumption about the relation between idiosyncratic income growth and idiosyncratic risk means that an increase in $\mho$ is a mean-preserving spread in human wealth; again see [the lecture notes](http://www.econ2.jhu.edu/people/ccarroll/public/LectureNotes/Consumption/TractableBufferStock/)).
Under CRRA utility $u(C) = \frac{C^{1-\rho}}{1-\rho}$, the problem can be normalized by $P$. Using lower case for normalized varibles (e.g., $c = C/P$), the normalized problem can be expressed by the Bellman equation:
\begin{eqnarray*}
v_t({m}_t) &=& \max_{{c}_t} ~ U({c}_t) + \beta \Gamma^{1-\rho} \overbrace{\mathbb{E}[v_{t+1}^{\bullet}]}^{=p v_{t+1}^{u}+(1-p)v_{t+1}^{e}} \\
& s.t. & \\
{m}_{t+1} &=& (m_{t}-c_{t})\mathcal{R} + \mathbb{1}_{t+1},
\end{eqnarray*}
where $\mathcal{R} = R/\Gamma$, and $\mathbb{1}_{t+1} = 1$ if the consumer is employed (and zero if unemployed).
Under plausible parameter values the model has a target level of $\check{m} = M/P$ (market resources to permanent income) with an analytical solution that exhibits plausible relationships among all of the parameters.
Defining $\gamma = \log \Gamma$ and $r = \log R$, the handout shows that an approximation of the target is given by the formula:
\begin{align}
\check{m} & = 1 + \left(\frac{1}{(\gamma-r)+(1+(\gamma/\mho)(1-(\gamma/\mho)(\rho-1)/2))}\right)
\end{align}
```
# Define a parameter dictionary and representation of the agents for the tractable buffer stock model
TBS_dictionary = {'UnempPrb' : .00625, # Prob of becoming unemployed; working life of 1/UnempProb = 160 qtrs
'DiscFac' : 0.975, # Intertemporal discount factor
'Rfree' : 1.01, # Risk-free interest factor on assets
'PermGroFac' : 1.0025, # Permanent income growth factor (uncompensated)
'CRRA' : 2.5} # Coefficient of relative risk aversion
MyTBStype = TractableConsumerType(**TBS_dictionary)
```
## Target Wealth
Whether the model exhibits a "target" or "stable" level of the wealth-to-permanent-income ratio for employed consumers depends on whether the 'Growth Impatience Condition' (the GIC) holds:
\begin{align}\label{eq:GIC}
\left(\frac{(R \beta (1-\mho))^{1/\rho}}{\Gamma}\right) & < 1
\\ \left(\frac{(R \beta (1-\mho))^{1/\rho}}{G (1-\mho)}\right) &< 1
\\ \left(\frac{(R \beta)^{1/\rho}}{G} (1-\mho)^{-\rho}\right) &< 1
\end{align}
and recall (from [PerfForesightCRRA](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/)) that the perfect foresight 'Growth Impatience Factor' is
\begin{align}\label{eq:PFGIC}
\left(\frac{(R \beta)^{1/\rho}}{G}\right) &< 1
\end{align}
so since $\mho > 0$, uncertainty makes it harder to be 'impatient.' To understand this, think of someone who, in the perfect foresight model, was 'poised': Exactly on the knife edge between patience and impatience. Now add a precautionary saving motive; that person will now (to some degree) be pushed off the knife edge in the direction of 'patience.' So, in the presence of uncertainty, the conditions on parameters other than $\mho$ must be stronger in order to guarantee 'impatience' in the sense of wanting to spend enough for your wealth to decline _despite_ the extra precautionary motive.
```
# Define a function that plots the employed consumption function and sustainable consumption function
# for given parameter values
def makeTBSplot(DiscFac,CRRA,Rfree,PermGroFac,UnempPrb,mMax,mMin,cMin,cMax,plot_emp,plot_ret,plot_mSS,show_targ):
MyTBStype.DiscFac = DiscFac
MyTBStype.CRRA = CRRA
MyTBStype.Rfree = Rfree
MyTBStype.PermGroFac = PermGroFac
MyTBStype.UnempPrb = UnempPrb
try:
MyTBStype.solve()
except:
print('Unable to solve; parameter values may be too close to their limiting values')
plt.xlabel('Market resources ${m}_t$')
plt.ylabel('Consumption ${c}_t$')
plt.ylim([cMin,cMax])
plt.xlim([mMin,mMax])
m = np.linspace(mMin,mMax,num=100,endpoint=True)
if plot_emp:
c = MyTBStype.solution[0].cFunc(m)
c[m==0.] = 0.
plt.plot(m,c,'-b')
if plot_mSS:
plt.plot([mMin,mMax],[(MyTBStype.PermGroFacCmp/MyTBStype.Rfree + mMin*(1.0-MyTBStype.PermGroFacCmp/MyTBStype.Rfree)),(MyTBStype.PermGroFacCmp/MyTBStype.Rfree + mMax*(1.0-MyTBStype.PermGroFacCmp/MyTBStype.Rfree))],'--k')
if plot_ret:
c = MyTBStype.solution[0].cFunc_U(m)
plt.plot(m,c,'-g')
if show_targ:
mTarg = MyTBStype.mTarg
cTarg = MyTBStype.cTarg
targ_label = r'$\left(\frac{1}{(\gamma-r)+(1+(\gamma/\mho)(1-(\gamma/\mho)(\rho-1)/2))}\right) $' #+ mystr(mTarg) + '\n$\check{c}^* = $ ' + mystr(cTarg)
plt.annotate(targ_label,xy=(0.0,0.0),xytext=(0.2,0.1),textcoords='axes fraction',fontsize=18)
plt.plot(mTarg,cTarg,'ro')
plt.annotate('↙️ m target',(mTarg,cTarg),xytext=(0.25,0.2),ha='left',textcoords='offset points')
plt.show()
return None
# Define widgets to control various aspects of the plot
# Define a slider for the discount factor
DiscFac_widget = widgets.FloatSlider(
min=0.9,
max=0.99,
step=0.0002,
value=TBS_dictionary['DiscFac'], # Default value
continuous_update=False,
readout_format='.4f',
description='$\\beta$')
# Define a slider for relative risk aversion
CRRA_widget = widgets.FloatSlider(
min=1.0,
max=5.0,
step=0.01,
value=TBS_dictionary['CRRA'], # Default value
continuous_update=False,
readout_format='.2f',
description='$\\rho$')
# Define a slider for the interest factor
Rfree_widget = widgets.FloatSlider(
min=1.01,
max=1.04,
step=0.0001,
value=TBS_dictionary['Rfree'], # Default value
continuous_update=False,
readout_format='.4f',
description='$R$')
# Define a slider for permanent income growth
PermGroFac_widget = widgets.FloatSlider(
min=1.00,
max=1.015,
step=0.0002,
value=TBS_dictionary['PermGroFac'], # Default value
continuous_update=False,
readout_format='.4f',
description='$G$')
# Define a slider for unemployment (or retirement) probability
UnempPrb_widget = widgets.FloatSlider(
min=0.000001,
max=TBS_dictionary['UnempPrb']*2, # Go up to twice the default value
step=0.00001,
value=TBS_dictionary['UnempPrb'],
continuous_update=False,
readout_format='.5f',
description='$\\mho$')
# Define a text box for the lower bound of {m}_t
mMin_widget = widgets.FloatText(
value=0.0,
step=0.1,
description='$m$ min',
disabled=False)
# Define a text box for the upper bound of {m}_t
mMax_widget = widgets.FloatText(
value=50.0,
step=0.1,
description='$m$ max',
disabled=False)
# Define a text box for the lower bound of {c}_t
cMin_widget = widgets.FloatText(
value=0.0,
step=0.1,
description='$c$ min',
disabled=False)
# Define a text box for the upper bound of {c}_t
cMax_widget = widgets.FloatText(
value=1.5,
step=0.1,
description='$c$ max',
disabled=False)
# Define a check box for whether to plot the employed consumption function
plot_emp_widget = widgets.Checkbox(
value=True,
description='Plot employed $c$ function',
disabled=False)
# Define a check box for whether to plot the retired consumption function
plot_ret_widget = widgets.Checkbox(
value=False,
description='Plot retired $c$ function',
disabled=False)
# Define a check box for whether to plot the sustainable consumption line
plot_mSS_widget = widgets.Checkbox(
value=True,
description='Plot sustainable $c$ line',
disabled=False)
# Define a check box for whether to show the target annotation
show_targ_widget = widgets.Checkbox(
value=True,
description = 'Show target $(m,c)$',
disabled = False)
# Make an interactive plot of the tractable buffer stock solution
# To make some of the widgets not appear, replace X_widget with fixed(desired_fixed_value) in the arguments below.
interact(makeTBSplot,
DiscFac = DiscFac_widget,
CRRA = CRRA_widget,
Rfree = Rfree_widget,
PermGroFac = PermGroFac_widget,
UnempPrb = UnempPrb_widget,
mMin = mMin_widget,
mMax = mMax_widget,
cMin = cMin_widget,
cMax = cMax_widget,
show_targ = show_targ_widget,
plot_emp = plot_emp_widget,
plot_ret = plot_ret_widget,
plot_mSS = plot_mSS_widget,
);
```
# PROBLEM
Your task is to make a simplified slider that involves only $\beta$.
First, create a variable `betaMax` equal to the value of $\beta$ at which the Growth Impatience Factor is exactly equal to 1 (that is, the consumer is exactly on the border between patience and impatience). (Hint: The formula for this is [here](http://www.econ2.jhu.edu/people/ccarroll/public/LectureNotes/Consumption/TractableBufferStock/#GIFMax)).
Next, create a slider/'widget' like the one above, but where all variables except $\beta$ are set to their default values, and the slider takes $\beta$ from 0.05 below its default value up to `betaMax - 0.01`. (The numerical solution algorithm becomes unstable when the GIC is too close to being violated, so you don't want to go all the way up to `betaMax.`)
Explain the logic of the result that you see.
(Hint: You do not need to copy and paste (then edit) the entire contents of the cell that creates the widgets above; you only need to modify the `DiscFac_widget`)
```
# Define a slider for the discount factor
my_rho = TBS_dictionary['CRRA'];
my_R = TBS_dictionary['Rfree'];
my_upsidedownOmega = TBS_dictionary['UnempPrb']; # didnt have time to figure out the right value
my_Gamma = TBS_dictionary['PermGroFac']/(1-my_upsidedownOmega);
betaMax = (my_Gamma**my_rho)/(my_R*(1-my_upsidedownOmega));
DiscFac_widget = widgets.FloatSlider(
min=TBS_dictionary['DiscFac']-0.05,
max=betaMax-0.01,
step=0.0002,
value=TBS_dictionary['DiscFac'], # Default value
continuous_update=False,
readout_format='.4f',
description='$\\beta$')
interact(makeTBSplot,
DiscFac = DiscFac_widget,
CRRA = fixed(TBS_dictionary['CRRA']),
Rfree = fixed(TBS_dictionary['Rfree']),
PermGroFac = fixed(TBS_dictionary['PermGroFac']),
UnempPrb = fixed(TBS_dictionary['UnempPrb']),
mMin = mMin_widget,
mMax = mMax_widget,
cMin = cMin_widget,
cMax = cMax_widget,
show_targ = show_targ_widget,
plot_emp = plot_emp_widget,
plot_ret = plot_ret_widget,
plot_mSS = plot_mSS_widget,
);
```
# Target level of market resources increases with increased patience as does the consumption because patience is rewarded by the returns on savings.
|
github_jupyter
|
```
import h5py
import keras
import numpy as np
import os
import random
import sys
import tensorflow as tf
sys.path.append("../src")
import localmodule
# Define constants.
dataset_name = localmodule.get_dataset_name()
models_dir = localmodule.get_models_dir()
units = localmodule.get_units()
n_input_hops = 104
n_filters = [24, 48, 48]
kernel_size = [5, 5]
pool_size = [2, 4]
n_hidden_units = 64
# Define and compile Keras model.
# NB: the original implementation of Justin Salamon in ICASSP 2017 relies on
# glorot_uniform initialization for all layers, and the optimizer is a
# stochastic gradient descent (SGD) with a fixed learning rate of 0.1.
# Instead, we use a he_uniform initialization for the layers followed
# by rectified linear units (see He ICCV 2015), and replace the SGD by
# the Adam adaptive stochastic optimizer (see Kingma ICLR 2014).
model = keras.models.Sequential()
# Layer 1
bn = keras.layers.normalization.BatchNormalization(
input_shape=(128, n_input_hops, 1))
model.add(bn)
conv1 = keras.layers.Convolution2D(n_filters[0], kernel_size,
padding="same", kernel_initializer="he_normal", activation="relu")
model.add(conv1)
pool1 = keras.layers.MaxPooling2D(pool_size=pool_size)
model.add(pool1)
# Layer 2
conv2 = keras.layers.Convolution2D(n_filters[1], kernel_size,
padding="same", kernel_initializer="he_normal", activation="relu")
model.add(conv2)
pool2 = keras.layers.MaxPooling2D(pool_size=pool_size)
model.add(pool2)
# Layer 3
conv3 = keras.layers.Convolution2D(n_filters[2], kernel_size,
padding="same", kernel_initializer="he_normal", activation="relu")
model.add(conv3)
# Layer 4
drop1 = keras.layers.Dropout(0.5)
model.add(drop1)
flatten = keras.layers.Flatten()
model.add(flatten)
dense1 = keras.layers.Dense(n_hidden_units,
kernel_initializer="he_normal", activation="relu",
kernel_regularizer=keras.regularizers.l2(0.01))
model.add(dense1)
# Layer 5
# We put a single output instead of 43 in the original paper, because this
# is binary classification instead of multilabel classification.
drop2 = keras.layers.Dropout(0.5)
model.add(drop2)
dense2 = keras.layers.Dense(1,
kernel_initializer="normal", activation="sigmoid",
kernel_regularizer=keras.regularizers.l2(0.0002))
model.add(dense2)
# Compile model, print model summary.
metrics = ["accuracy"]
#model.compile(loss="binary_crossentropy", optimizer="sgd", metrics=metrics)
#model.compile(loss="mse", optimizer="adam", metrics=metrics)
model.compile(loss="mse", optimizer="sgd", metrics=metrics)
#model.summary()
# Train model.
fold_units = ["unit01"]
augs = ["original"]
aug_dict = localmodule.get_augmentations()
data_dir = localmodule.get_data_dir()
dataset_name = localmodule.get_dataset_name()
logmelspec_name = "_".join([dataset_name, "logmelspec"])
logmelspec_dir = os.path.join(data_dir, logmelspec_name)
original_dir = os.path.join(logmelspec_dir, "original")
n_hops = 104
Xs = []
ys = []
for unit_str in units[:2]:
unit_name = "_".join([dataset_name, "original", unit_str])
unit_path = os.path.join(original_dir, unit_name + ".hdf5")
lms_container = h5py.File(unit_path)
lms_group = lms_container["logmelspec"]
keys = list(lms_group.keys())
for key in keys:
X = lms_group[key]
X_width = X.shape[1]
first_col = int((X_width-n_hops) / 2)
last_col = int((X_width+n_hops) / 2)
X = X[:, first_col:last_col]
X = np.array(X)[np.newaxis, :, :, np.newaxis]
Xs.append(X)
ys.append(np.float32(key.split("_")[3]))
X = np.concatenate(Xs, axis=0)
y = np.array(ys)
X.shape
# MSE, ADAM
model.fit(X[:,:,:,:], y[:], epochs=1, verbose=True)
print(model.evaluate(X, y))
# MSE, SGD
model.fit(X[:,:,:,:], y[:], epochs=1, verbose=True)
print(model.evaluate(X, y))
# MSE, SGD
model.fit(X[:,:,:,:], y[:], epochs=1, verbose=True)
print(model.evaluate(X, y))
# BCE, SGD
model.fit(X[:,:,:,:], y[:], epochs=1, verbose=True)
print(model.evaluate(X, y))
# BCE, ADAM
model.fit(X[:,:,:,:], y[:], epochs=1, verbose=True)
print(model.evaluate(X, y))
m = keras.models.Sequential()
m.add(keras.layers.Dense(1, input_shape=(1,)))
X = np.array([[0.0], [1.0]])
y = np.array([0.0, 1.0])
m.compile(optimizer="sgd", loss="binary_crossentropy")
print(m.layers[0].get_weights())
m.fit(X, y, epochs=500, verbose=False)
print(m.predict(X))
print(m.layers[0].get_weights())
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
# Generate dummy data
neg_X = np.random.randn(500, 2) + np.array([-2.0, 1.0])
pos_X = np.random.randn(500, 2) + np.array([1.0, -2.0])
X = np.concatenate((neg_X, pos_X), axis=0)
neg_Y = np.zeros((500,))
pos_Y = np.ones((500,))
Y = np.concatenate((neg_Y, pos_Y), axis=0)
model = Sequential()
model.add(Dense(10, input_dim=2, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
print(model.layers[0].get_weights())
model.fit(X, Y, epochs=20, batch_size=100, verbose=False)
print(model.layers[0].get_weights())
from matplotlib import pyplot as plt
%matplotlib inline
plt.figure()
plt.plot(neg_X[:, 0], neg_X[:, 1], '+');
plt.plot(pos_X[:, 0], pos_X[:, 1], '+');
```
|
github_jupyter
|
For all numerical experiments, we will be using the Chambolle-Pock primal-dual algorithm - details can be found on:
1. [A First-order Primal-dual Algorithm for Convex Problems with Applications to Imaging](https://link.springer.com/article/10.1007/s10851-010-0251-1), A. Chambolle, T. Pock, Journal of Mathematical Imaging and Vision (2011). [PDF](https://hal.archives-ouvertes.fr/hal-00490826/document)
2. [Recovering Piecewise Smooth Multichannel Images by Minimization of Convex Functionals with Total Generalized Variation Penalty](https://link.springer.com/chapter/10.1007/978-3-642-54774-4_3), K. Bredies, Efficient algorithms for global optimization methods in computer vision (2014). [PDF](https://imsc.uni-graz.at/mobis/publications/SFB-Report-2012-006.pdf)
3. [Second Order Total Generalized Variation (TGV) for MRI](https://onlinelibrary.wiley.com/doi/full/10.1002/mrm.22595), F. Knoll, K. Bredies, T. Pock, R. Stollberger (2010). [PDF](https://onlinelibrary.wiley.com/doi/epdf/10.1002/mrm.22595)
In order to compute the spatia dependent regularization weights we follow:
4. [Dualization and Automatic Distributed Parameter Selection of Total Generalized Variation via Bilevel Optimization](https://arxiv.org/pdf/2002.05614.pdf), M. Hintermüller, K. Papafitsoros, C.N. Rautenberg, H. Sun, arXiv preprint, (2020)
# Huber Total Variation Denoising
We are solving the discretized version of the following minimization problem
\begin{equation}\label{L2-TV}
\min_{u} \int_{\Omega} (u-f)^{2}dx + \alpha \int_{\Omega} \varphi_{\gamma}(\nabla u)dx
\end{equation}
were $\phi_{\gamma}:\mathbb{R}^{d}\to \mathbb{R}^{+}$ with
\begin{equation}
\phi_{\gamma}(v)=
\begin{cases}
|v|-\frac{1}{2}\gamma & \text{ if } |v|\ge \gamma,\\
\frac{1}{2\gamma}|v(x)|^{2}& \text{ if } |v|< \gamma.\\
\end{cases}
\end{equation}
## Import data...
```
import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
mat_contents = sio.loadmat('tutorial1_classical_reg_methods/parrot')
clean=mat_contents['parrot']
f=mat_contents['parrot_noisy_01']
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(clean)
imgplot2.set_cmap('gray')
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(f)
imgplot2.set_cmap('gray')
from tutorial1_classical_reg_methods.Tutorial_Codes import psnr, reproject, dxm, dym, dxp, dyp, function_TGV_denoising_CP, P_a_Huber, function_HuberTV_denoising_CP
```
## Task 1
Choose different values for $\alpha$ and $\gamma$ and interprent your results:
- Fix $\gamma$ small, e.g. $\gamma=0.01$ and play with the values of $\alpha$. What do you observe for large $\alpha$? What for small?
- Fix $\alpha$ and play with the values of $\gamma$. What do you observe for large $\gamma$? What for small?
```
alpha=0.085
gamma=0.001
uTV = function_HuberTV_denoising_CP(f,clean, alpha, gamma,1000)
uTikhonov = function_HuberTV_denoising_CP(f,clean, 5, 2,1000)
```
# Total Generalized Variation Denoising
We are solving the discretized version of the following minimization problem
\begin{equation}\label{L2-TGV}
\min_{u} \int_{\Omega} (u-f)^{2}dx + TGV_{\alpha,\beta}(u)
\end{equation}
where
\begin{equation}
TGV_{\alpha,\beta}(u)=\min_{w} \alpha \int_{\Omega} |\nabla u-w|dx + \beta \int_{\Omega} |Ew|dx
\end{equation}
## Task 2a
Choose different values for $\alpha$ and $\beta$ and solve the TGV denoising minimization problem.
- What happens for small $\alpha$ and large $\beta$?
- What happens for large $\alpha$ and small $\beta$?
- What happens where both parameters are small/large?
- Try to find the combination of parameters that gives the highest PSNR value.
```
#alpha=0.085
#beta=0.15
alpha=0.085
beta=0.15
uTGV = function_TGV_denoising_CP(f,clean, alpha, beta, 500)
```
## Task 2b
Import the following spatial dependent regularization weights, which are taken from this work:
- [Dualization and Automatic Distributed Parameter Selection of Total Generalized Variation via Bilevel Optimization](https://arxiv.org/pdf/2002.05614.pdf), M. Hintermüller, K. Papafitsoros, C.N. Rautenberg, H. Sun, arXiv preprint, (2020)
```
weight_contents = sio.loadmat('tutorial1_classical_reg_methods/spatial_dependent_weights')
alpha_spatial=weight_contents['TGV_alpha_spatial']
beta_spatial=weight_contents['TGV_beta_spatial']
#plt.figure(figsize = (7,7))
#imgplot2 = plt.imshow(alpha_spatial)
#imgplot2.set_cmap('gray')
#plt.figure(figsize = (7,7))
#imgplot2 = plt.imshow(beta_spatial)
#imgplot2.set_cmap('gray')
from mpl_toolkits.mplot3d import Axes3D
(n,m)=alpha_spatial.shape
x=range(n)
y=range(m)
X, Y = np.meshgrid(x, y)
halpha = plt.figure(figsize = (7,7))
h_alpha = halpha.add_subplot(111, projection='3d')
h_alpha.plot_surface(X, Y, alpha_spatial)
hbeta = plt.figure(figsize = (7,7))
h_beta = hbeta.add_subplot(111, projection='3d')
h_beta.plot_surface(X, Y, beta_spatial)
hclean = plt.figure(figsize = (7,7))
h_clean = hclean.add_subplot(111, projection='3d')
h_clean.plot_surface(X, Y, clean)
```
And run again the algorithm with this weight:
```
uTGVspatial = function_TGV_denoising_CP(f,clean, alpha_spatial, beta_spatial, 500)
```
Now you can see all the reconstructions together:
```
plt.rcParams['figure.figsize'] = np.array([4, 3])*3
plt.rcParams['figure.dpi'] = 120
fig, axs = plt.subplots(ncols=3, nrows=2)
# remove ticks from plot
for ax in axs.flat:
ax.set(xticks=[], yticks=[])
axs[0,0].imshow(clean, cmap='gray')
axs[0,0].set(xlabel='Clean')
axs[0,1].imshow(f, cmap='gray')
axs[0,1].set(xlabel='Noisy, PSNR = ' + str(np.around(psnr(f, clean),decimals=2)))
axs[0,2].imshow(uTikhonov, cmap='gray')
axs[0,2].set(xlabel='Tikhonov, PSNR = ' + str(np.around(psnr(uTikhonov, clean),decimals=2)))
axs[1,0].imshow(uTV, cmap='gray')
axs[1,0].set(xlabel='TV, PSNR = ' + str(np.around(psnr(uTV, clean),decimals=2)))
axs[1,1].imshow(uTGV, cmap='gray')
axs[1,1].set(xlabel = 'TGV, PSNR = ' + str(np.around(psnr(uTGV, clean),decimals=2)))
axs[1,2].imshow(uTGVspatial, cmap='gray')
axs[1,2].set(xlabel = 'TGV spatial, PSNR = ' + str(np.around(psnr(uTGVspatial, clean),decimals=2)))
```
# TV and TGV MRI reconstruction
Here we will be solving the discretized version of the following minimization problem
\begin{equation}
\min_{u} \int_{\Omega} (S \circ F u-g)^{2}dx + \alpha TV(u)
\end{equation}
and
\begin{equation}
\min_{u} \int_{\Omega} (S \circ F u-g)^{2}dx + TGV_{\alpha,\beta}(u)
\end{equation}
The code for the examples below was kindly provided by Clemens Sirotenko.
## Import data
```
from tutorial1_classical_reg_methods.Tutorial_Codes import normalize, subsampling, subsampling_transposed, compute_differential_operators, function_TV_MRI_CP, function_TGV_MRI_CP
from scipy import sparse
import scipy.sparse.linalg
image=np.load('tutorial1_classical_reg_methods/img_example.npy')
image=np.abs(image[:,:,3])
image = normalize(image)
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(image)
imgplot2.set_cmap('gray')
```
## Simulate noisy data and subsampled data
Create noisy data $ S \circ F x + \varepsilon = y^{\delta}$ where $x$ is th clean image and $ \varepsilon \sim \mathcal{N}(0,\sigma^2)$ normal distributed centered complex noise
```
mask = np.ones(np.shape(image))
mask[:,1:-1:3] = 0
Fx = np.fft.fft2(image,norm='ortho') #ortho means that the fft2 is unitary
(M,N) = image.shape
rate = 0.039 ##noise rate
noise = np.random.randn(M,N) + (1j)*np.random.randn(M,N) #cmplx noise
distorted_full = Fx + rate*noise
distorted = subsampling(distorted_full, mask)
zero_filling = np.real(np.fft.ifft2(subsampling_transposed(distorted, mask), norm = 'ortho'))
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(mask)
imgplot2.set_cmap('gray')
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(zero_filling)
imgplot2.set_cmap('gray')
```
## TV MRI reconstruction
```
x_0 = zero_filling
data = distorted
alpha = 0.025
tau = 1/np.sqrt(12)
sigma = tau
h = 1
max_it = 3000
tol = 1e-4 # algorithm stops if |x_k - x_{k+1}| < tol
x_TV = function_TV_MRI_CP(data,image,mask,x_0,tau,sigma,h,max_it,tol,alpha)
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(x_TV)
imgplot2.set_cmap('gray')
```
## TGV MRI reconstruction
```
alpha = 0.02
beta = 0.035
x_0 = zero_filling
data = distorted
tau = 1/np.sqrt(12)
sigma = tau
lambda_prox = 1
h = 1
tol = 1e-4
max_it = 2500
x_TGV = function_TGV_MRI_CP(data,image, mask,x_0,tau,sigma,lambda_prox,h,max_it,tol,beta,alpha)
plt.figure(figsize = (7,7))
imgplot2 = plt.imshow(x_TGV)
imgplot2.set_cmap('gray')
```
Now you can see all the reconstructions together:
```
plt.rcParams['figure.figsize'] = np.array([2, 2])*5
plt.rcParams['figure.dpi'] = 120
fig, axs = plt.subplots(ncols=2, nrows=2)
# remove ticks from plot
for ax in axs.flat:
ax.set(xticks=[], yticks=[])
axs[0,0].imshow(normalize(image), cmap='gray')
axs[0,0].set(xlabel='Clean Image')
axs[1,0].imshow(normalize(x_TV), cmap='gray')
axs[1,0].set(xlabel='TV Reconstruction, PSNR = ' + str(np.around(psnr(x_TV, image),decimals=2)))
axs[0,1].imshow(normalize(x_0), cmap='gray')
axs[0,1].set(xlabel = 'Zero Filling Solution , PSNR = ' + str(np.around(psnr(x_0, image),decimals=2)))
axs[1,1].imshow(normalize(x_TGV), cmap='gray')
axs[1,1].set(xlabel='TGV Reconstruction , PSNR = ' + str(np.around(psnr(x_TGV, image),decimals=2)))
```
|
github_jupyter
|
# Figure. X Inactivation
```
import cPickle
import datetime
import glob
import os
import random
import re
import subprocess
import cdpybio as cpb
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pybedtools as pbt
import scipy.stats as stats
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels as sms
import cardipspy as cpy
import ciepy
%matplotlib inline
%load_ext rpy2.ipython
import socket
if socket.gethostname() == 'fl-hn1' or socket.gethostname() == 'fl-hn2':
pbt.set_tempdir('/frazer01/home/cdeboever/tmp')
outdir = os.path.join(ciepy.root, 'output',
'figure_x_inactivation')
cpy.makedir(outdir)
private_outdir = os.path.join(ciepy.root, 'private_output',
'figure_x_inactivation')
cpy.makedir(private_outdir)
plt.rcParams['font.sans-serif'] = ['Arial']
plt.rcParams['font.size'] = 8
fn = os.path.join(ciepy.root, 'output', 'input_data', 'rsem_tpm.tsv')
tpm = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'rnaseq_metadata.tsv')
rna_meta = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'subject_metadata.tsv')
subject_meta = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'wgs_metadata.tsv')
wgs_meta = pd.read_table(fn, index_col=0)
gene_info = pd.read_table(cpy.gencode_gene_info, index_col=0)
genes = pbt.BedTool(cpy.gencode_gene_bed)
fn = os.path.join(ciepy.root, 'output', 'input_data', 'cnvs.tsv')
cnvs = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'x_inactivation', 'x_ase_exp.tsv')
x_exp = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'x_inactivation', 'expression_densities.tsv')
pdfs = pd.read_table(fn, index_col=0)
pdfs.columns = ['No ASE', 'ASE']
fn = os.path.join(ciepy.root, 'output', 'input_data',
'mbased_major_allele_freq.tsv')
maj_af = pd.read_table(fn, index_col=0)
fn = os.path.join(ciepy.root, 'output', 'input_data',
'mbased_p_val_ase.tsv')
ase_pval = pd.read_table(fn, index_col=0)
locus_p = pd.Panel({'major_allele_freq':maj_af, 'p_val_ase':ase_pval})
locus_p = locus_p.swapaxes(0, 2)
snv_fns = glob.glob(os.path.join(ciepy.root, 'private_output', 'input_data', 'mbased_snv',
'*_snv.tsv'))
count_fns = glob.glob(os.path.join(ciepy.root, 'private_output', 'input_data', 'allele_counts',
'*mbased_input.tsv'))
snv_res = {}
for fn in snv_fns:
snv_res[os.path.split(fn)[1].split('_')[0]] = pd.read_table(fn, index_col=0)
count_res = {}
for fn in count_fns:
count_res[os.path.split(fn)[1].split('_')[0]] = pd.read_table(fn, index_col=0)
snv_p = pd.Panel(snv_res)
# We'll keep female subjects with no CNVs on the X chromosome.
sf = subject_meta[subject_meta.sex == 'F']
meta = sf.merge(rna_meta, left_index=True, right_on='subject_id')
s = set(meta.subject_id) & set(cnvs.ix[cnvs.chr == 'chrX', 'subject_id'])
meta = meta[meta.subject_id.apply(lambda x: x not in s)]
meta = meta.ix[[x for x in snv_p.items if x in meta.index]]
snv_p = snv_p.ix[meta.index]
snv_p = snv_p.ix[meta.index]
locus_p = locus_p.ix[meta.index]
# Filter and take log.
tpm_f = tpm[meta[meta.sex == 'F'].index]
tpm_f = tpm_f[(tpm_f != 0).sum(axis=1) > 0]
log_tpm = np.log10(tpm_f + 1)
# Mean center.
log_tpm_c = (log_tpm.T - log_tpm.mean(axis=1)).T
# Variance normalize.
log_tpm_n = (log_tpm_c.T / log_tpm_c.std(axis=1)).T
single = locus_p.ix['071ca248-bcb1-484d-bff2-3aefc84f8688', :, :].dropna()
x_single = single[gene_info.ix[single.index, 'chrom'] == 'chrX']
notx_single = single[gene_info.ix[single.index, 'chrom'] != 'chrX']
t = locus_p.ix[:, :, 'major_allele_freq']
x_all = locus_p.ix[:, set(t.index) & set(gene_info[gene_info.chrom == 'chrX'].index), :]
notx_all = locus_p.ix[:, set(t.index) & set(gene_info[gene_info.chrom != 'chrX'].index), :]
genes_to_plot = ['XIST', 'TSIX']
t = pd.Series(gene_info.index, index=gene_info.gene_name)
exp = log_tpm_n.ix[t[genes_to_plot]].T
exp.columns = genes_to_plot
exp = exp.ix[x_all.items].sort_values(by='XIST', ascending=False)
sns.set_style('white')
```
## Paper
```
n = x_exp.shape[0]
print('Plotting mean expression for {} X chromosome genes.'.format(n))
n = sum(x_exp.mean_sig_exp < x_exp.mean_not_sig_exp)
print('{} of {} ({:.2f}%) genes had higher expression for samples without ASE.'.format(
n, x_exp.shape[0], n / float(x_exp.shape[0]) * 100))
fig = plt.figure(figsize=(6.85, 9), dpi=300)
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.text(0, 1, 'Figure 6',
size=16, va='top', )
ciepy.clean_axis(ax)
ax.set_xticks([])
ax.set_yticks([])
gs.tight_layout(fig, rect=[0, 0.90, 0.5, 1])
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.scatter(x_exp.mean_sig_exp, x_exp.mean_not_sig_exp, alpha=0.4, color='grey', s=10)
ax.set_ylabel('Mean expression,\nno ASE', fontsize=8)
ax.set_xlabel('Mean expression, ASE', fontsize=8)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
plt.plot([min(xmin, ymin), max(xmax, ymax)], [min(xmin, ymin), max(xmax, ymax)], color='black', ls='--')
ax.set_xlim(-1, 1.75)
ax.set_ylim(-1, 1.75)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(8)
gs.tight_layout(fig, rect=[0.02, 0.79, 0.32, 0.95])
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.hist(x_single.major_allele_freq, bins=np.arange(0.5, 1.05, 0.05), color='grey')
ax.set_xlim(0.5, 1)
ax.set_ylabel('Number of genes', fontsize=8)
ax.set_xlabel('Allelic imbalance fraction', fontsize=8)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks(np.arange(0, 20, 4))
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(8)
gs.tight_layout(fig, rect=[0.33, 0.79, 0.66, 0.95])
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.hist(notx_single.major_allele_freq, bins=np.arange(0.5, 1.05, 0.05), color='grey')
ax.set_xlim(0.5, 1)
ax.set_ylabel('Number of genes', fontsize=8)
ax.set_xlabel('Allelic imbalance fraction', fontsize=8)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.yaxis.set_major_formatter(ciepy.comma_format)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(8)
gs.tight_layout(fig, rect=[0.66, 0.79, 1, 0.95])
gs = gridspec.GridSpec(1, 4, width_ratios=[0.5, 1.2, 3, 3])
ax = fig.add_subplot(gs[0, 0])
passage_im = ax.imshow(np.array([meta.ix[exp.index, 'passage'].values]).T,
aspect='auto', interpolation='nearest',
cmap=sns.palettes.cubehelix_palette(light=.95, as_cmap=True))
ciepy.clean_axis(ax)
ax.set_xlabel('Passage', fontsize=8)
ax = fig.add_subplot(gs[0, 1])
# Make norm.
vmin = np.floor(exp.min().min())
vmax = np.ceil(exp.max().max())
vmax = max([vmax, abs(vmin)])
vmin = vmax * -1
exp_norm = mpl.colors.Normalize(vmin, vmax)
exp_im = ax.imshow(exp, aspect='auto', interpolation='nearest',
norm=exp_norm, cmap=plt.get_cmap('RdBu_r'))
ciepy.clean_axis(ax)
ax.set_xticks([0, 1])
ax.set_xticklabels(exp.columns, fontsize=8)
for t in ax.get_xticklabels():
t.set_fontstyle('italic')
#t.set_rotation(30)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
percent_norm = mpl.colors.Normalize(0, 1)
ax = fig.add_subplot(gs[0, 2])
r = x_all.ix[:, :, 'major_allele_freq'].apply(lambda z: pd.cut(z[z.isnull() == False],
bins=np.arange(0.5, 1.05, 0.05)))
r = r.apply(lambda z: z.value_counts())
r = (r.T / r.max(axis=1)).T
x_ase_im = ax.imshow(r.ix[exp.index], aspect='auto', interpolation='nearest',
norm=percent_norm, cmap=sns.palettes.cubehelix_palette(start=0, rot=-0.5, as_cmap=True))
ciepy.clean_axis(ax)
xmin,xmax = ax.get_xlim()
ax.set_xticks(np.arange(xmin, xmax + 1, 2))
ax.set_xticklabels(np.arange(0.5, 1.05, 0.1), fontsize=8)#, rotation=30)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.set_xlabel('Allelic imbalance fraction', fontsize=8)
ax = fig.add_subplot(gs[0, 3])
r = notx_all.ix[:, :, 'major_allele_freq'].apply(lambda z: pd.cut(z[z.isnull() == False],
bins=np.arange(0.5, 1.05, 0.05)))
r = r.apply(lambda z: z.value_counts())
r = (r.T / r.max(axis=1)).T
not_x_ase_im = ax.imshow(r.ix[exp.index], aspect='auto', interpolation='nearest',
norm=percent_norm, cmap=sns.palettes.cubehelix_palette(start=0, rot=-0.5, as_cmap=True))
ciepy.clean_axis(ax)
xmin,xmax = ax.get_xlim()
ax.set_xticks(np.arange(xmin, xmax + 1, 2))
ax.set_xticklabels(np.arange(0.5, 1.05, 0.1), fontsize=8)#, rotation=30)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.set_xlabel('Allelic imbalance fraction', fontsize=8)
gs.tight_layout(fig, rect=[0, 0.45, 0.8, 0.8])
gs = gridspec.GridSpec(2, 2)
# Plot colormap for gene expression.
ax = fig.add_subplot(gs[0:2, 0])
cb = plt.colorbar(mappable=exp_im, cax=ax)
cb.solids.set_edgecolor("face")
cb.outline.set_linewidth(0)
for l in ax.get_yticklines():
l.set_markersize(0)
cb.set_label('$\log$ TPM $z$-score', fontsize=8)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(8)
# Plot colormap for passage number.
ax = fig.add_subplot(gs[0, 1])
cb = plt.colorbar(mappable=passage_im, cax=ax)
cb.solids.set_edgecolor("face")
cb.outline.set_linewidth(0)
for l in ax.get_yticklines():
l.set_markersize(0)
cb.set_label('Passage number', fontsize=8)
cb.set_ticks(np.arange(12, 32, 4))
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(8)
# Plot colormap for ASE.
ax = fig.add_subplot(gs[1, 1])
cb = plt.colorbar(mappable=x_ase_im, cax=ax)
cb.solids.set_edgecolor("face")
cb.outline.set_linewidth(0)
for l in ax.get_yticklines():
l.set_markersize(0)
cb.set_label('Fraction of genes', fontsize=8)
cb.set_ticks(np.arange(0, 1.2, 0.2))
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(8)
gs.tight_layout(fig, rect=[0.8, 0.45, 1, 0.8])
t = fig.text(0.005, 0.93, 'A', weight='bold',
size=12)
t = fig.text(0.315, 0.93, 'B', weight='bold',
size=12)
t = fig.text(0.645, 0.93, 'C', weight='bold',
size=12)
t = fig.text(0.005, 0.79, 'D', weight='bold',
size=12)
t = fig.text(0.005, 0.44, 'E', weight='bold',
size=12)
t = fig.text(0.005, 0.22, 'F', weight='bold',
size=12)
plt.savefig(os.path.join(outdir, 'x_inactivation_skeleton.pdf'))
%%R
suppressPackageStartupMessages(library(Gviz))
t = x_all.ix[:, :, 'major_allele_freq']
r = gene_info.ix[t.index, ['start', 'end']]
%%R -i t,r
ideoTrack <- IdeogramTrack(genome = "hg19", chromosome = "chrX", fontsize=8, fontsize.legend=8,
fontcolor='black', cex=1, cex.id=1, cex.axis=1, cex.title=1)
mafTrack <- DataTrack(range=r, data=t, genome="hg19", type=c("p"), alpha=0.5, lwd=8,
span=0.05, chromosome="chrX", name="Allelic imbalance fraction", fontsize=8,
fontcolor.legend='black', col.axis='black', col.title='black',
background.title='transparent', cex=1, cex.id=1, cex.axis=1, cex.title=1,
fontface=1, fontface.title=1, alpha.title=1)
fn = os.path.join(outdir, 'p_maf.pdf')
%%R -i fn
pdf(fn, 6.85, 2)
plotTracks(c(ideoTrack, mafTrack), from=0, to=58100000, col.title='black')
dev.off()
fn = os.path.join(outdir, 'q_maf.pdf')
%%R -i fn
pdf(fn, 6.85, 2)
plotTracks(c(ideoTrack, mafTrack), from=63000000, to=155270560)
dev.off()
%%R -i fn
plotTracks(c(ideoTrack, mafTrack), from=63000000, to=155270560)
```
## Presentation
```
# Set fontsize
fs = 10
fig = plt.figure(figsize=(6.85, 5), dpi=300)
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.scatter(x_exp.mean_sig_exp, x_exp.mean_not_sig_exp, alpha=0.4, color='grey', s=10)
ax.set_ylabel('Mean expression,\nno ASE', fontsize=fs)
ax.set_xlabel('Mean expression, ASE', fontsize=fs)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
plt.plot([min(xmin, ymin), max(xmax, ymax)], [min(xmin, ymin), max(xmax, ymax)], color='black', ls='--')
ax.set_xlim(-1, 1.75)
ax.set_ylim(-1, 1.75)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
gs.tight_layout(fig, rect=[0.02, 0.62, 0.32, 0.95])
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.hist(x_single.major_allele_freq, bins=np.arange(0.5, 1.05, 0.05), color='grey')
ax.set_xlim(0.5, 1)
ax.set_ylabel('Number of genes', fontsize=fs)
ax.set_xlabel('Allelic imbalance fraction', fontsize=fs)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks(np.arange(0, 20, 4))
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
gs.tight_layout(fig, rect=[0.33, 0.62, 0.66, 0.95])
gs = gridspec.GridSpec(1, 1)
ax = fig.add_subplot(gs[0, 0])
ax.hist(notx_single.major_allele_freq, bins=np.arange(0.5, 1.05, 0.05), color='grey')
ax.set_xlim(0.5, 1)
ax.set_ylabel('Number of genes', fontsize=fs)
ax.set_xlabel('Allelic imbalance fraction', fontsize=fs)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.yaxis.set_major_formatter(ciepy.comma_format)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
gs.tight_layout(fig, rect=[0.66, 0.62, 1, 0.95])
#gs.tight_layout(fig, rect=[0, 0.62, 1, 1.0])
# t = fig.text(0.005, 0.88, 'A', weight='bold',
# size=12)
# t = fig.text(0.315, 0.88, 'B', weight='bold',
# size=12)
# t = fig.text(0.675, 0.88, 'C', weight='bold',
# size=12)
gs = gridspec.GridSpec(1, 4, width_ratios=[0.5, 1.2, 3, 3])
ax = fig.add_subplot(gs[0, 0])
passage_im = ax.imshow(np.array([meta.ix[exp.index, 'passage'].values]).T,
aspect='auto', interpolation='nearest',
cmap=sns.palettes.cubehelix_palette(light=.95, as_cmap=True))
ciepy.clean_axis(ax)
ax.set_xlabel('Passage')
ax = fig.add_subplot(gs[0, 1])
# Make norm.
vmin = np.floor(exp.min().min())
vmax = np.ceil(exp.max().max())
vmax = max([vmax, abs(vmin)])
vmin = vmax * -1
exp_norm = mpl.colors.Normalize(vmin, vmax)
exp_im = ax.imshow(exp, aspect='auto', interpolation='nearest',
norm=exp_norm, cmap=plt.get_cmap('RdBu_r'))
ciepy.clean_axis(ax)
ax.set_xticks([0, 1])
ax.set_xticklabels(exp.columns, fontsize=fs)
for t in ax.get_xticklabels():
t.set_fontstyle('italic')
t.set_rotation(30)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
percent_norm = mpl.colors.Normalize(0, 1)
ax = fig.add_subplot(gs[0, 2])
r = x_all.ix[:, :, 'major_allele_freq'].apply(lambda z: pd.cut(z[z.isnull() == False],
bins=np.arange(0.5, 1.05, 0.05)))
r = r.apply(lambda z: z.value_counts())
r = (r.T / r.max(axis=1)).T
x_ase_im = ax.imshow(r.ix[exp.index], aspect='auto', interpolation='nearest',
norm=percent_norm, cmap=sns.palettes.cubehelix_palette(start=0, rot=-0.5, as_cmap=True))
ciepy.clean_axis(ax)
xmin,xmax = ax.get_xlim()
ax.set_xticks(np.arange(xmin, xmax + 1, 2))
ax.set_xticklabels(np.arange(0.5, 1.05, 0.1), fontsize=fs)#, rotation=30)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.set_xlabel('Allelic imbalance fraction', fontsize=fs)
ax.set_title('X Chromosome')
ax = fig.add_subplot(gs[0, 3])
r = notx_all.ix[:, :, 'major_allele_freq'].apply(lambda z: pd.cut(z[z.isnull() == False],
bins=np.arange(0.5, 1.05, 0.05)))
r = r.apply(lambda z: z.value_counts())
r = (r.T / r.max(axis=1)).T
not_x_ase_im = ax.imshow(r.ix[exp.index], aspect='auto', interpolation='nearest',
norm=percent_norm, cmap=sns.palettes.cubehelix_palette(start=0, rot=-0.5, as_cmap=True))
ciepy.clean_axis(ax)
xmin,xmax = ax.get_xlim()
ax.set_xticks(np.arange(xmin, xmax + 1, 2))
ax.set_xticklabels(np.arange(0.5, 1.05, 0.1), fontsize=fs)#, rotation=30)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.set_xlabel('Allelic imbalance fraction', fontsize=fs)
ax.set_title('Autosomes')
# t = fig.text(0.005, 0.615, 'D', weight='bold',
# size=12)
gs.tight_layout(fig, rect=[0, 0, 0.75, 0.62])
gs = gridspec.GridSpec(2, 2)
# Plot colormap for gene expression.
ax = fig.add_subplot(gs[0:2, 0])
cb = plt.colorbar(mappable=exp_im, cax=ax)
cb.solids.set_edgecolor("face")
cb.outline.set_linewidth(0)
for l in ax.get_yticklines():
l.set_markersize(0)
cb.set_label('$\log$ TPM $z$-score', fontsize=fs)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
# Plot colormap for passage number.
ax = fig.add_subplot(gs[0, 1])
cb = plt.colorbar(mappable=passage_im, cax=ax)
cb.solids.set_edgecolor("face")
cb.outline.set_linewidth(0)
for l in ax.get_yticklines():
l.set_markersize(0)
cb.set_label('Passage number', fontsize=fs)
cb.set_ticks(np.arange(12, 32, 4))
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
# Plot colormap for ASE.
ax = fig.add_subplot(gs[1, 1])
cb = plt.colorbar(mappable=x_ase_im, cax=ax)
cb.solids.set_edgecolor("face")
cb.outline.set_linewidth(0)
for l in ax.get_yticklines():
l.set_markersize(0)
cb.set_label('Fraction of genes', fontsize=fs)
cb.set_ticks(np.arange(0, 1.2, 0.2))
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
gs.tight_layout(fig, rect=[0.75, 0, 1, 0.62])
plt.savefig(os.path.join(outdir, 'x_inactivation_hists_heatmaps_presentation.pdf'))
fig, axs = plt.subplots(1, 2, figsize=(6, 2.4), dpi=300)
ax = axs[1]
ax.hist(x_single.major_allele_freq, bins=np.arange(0.5, 1.05, 0.05), color='grey')
ax.set_xlim(0.5, 1)
ax.set_ylabel('Number of genes', fontsize=fs)
ax.set_xlabel('Allelic imbalance fraction', fontsize=fs)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks(np.arange(0, 20, 4))
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
ax.set_title('X Chromosome', fontsize=fs)
ax = axs[0]
ax.hist(notx_single.major_allele_freq, bins=np.arange(0.5, 1.05, 0.05), color='grey')
ax.set_xlim(0.5, 1)
ax.set_ylabel('Number of genes', fontsize=fs)
ax.set_xlabel('Allelic imbalance fraction', fontsize=fs)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(0)
ax.yaxis.set_major_formatter(ciepy.comma_format)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(fs)
ax.set_title('Autosomes', fontsize=fs)
fig.tight_layout()
plt.savefig(os.path.join(outdir, 'mhf_hists_presentation.pdf'))
t = x_all.ix[:, :, 'major_allele_freq']
r = gene_info.ix[t.index, ['start', 'end']]
%%R -i t,r
ideoTrack <- IdeogramTrack(genome = "hg19", chromosome = "chrX", fontsize=16, fontsize.legend=16,
fontcolor='black', cex=1, cex.id=1, cex.axis=1, cex.title=1)
mafTrack <- DataTrack(range=r, data=t, genome="hg19", type=c("smooth", "p"), alpha=0.75, lwd=8,
span=0.05, chromosome="chrX", name="Allelic imbalance fraction", fontsize=12,
fontcolor.legend='black', col.axis='black', col.title='black',
background.title='transparent', cex=1, cex.id=1, cex.axis=1, cex.title=1,
fontface=1, fontface.title=1, alpha.title=1)
fn = os.path.join(outdir, 'p_maf_presentation.pdf')
%%R -i fn
pdf(fn, 10, 3)
plotTracks(c(ideoTrack, mafTrack), from=0, to=58100000, col.title='black')
dev.off()
fn = os.path.join(outdir, 'q_maf_presentation.pdf')
%%R -i fn
pdf(fn, 10, 3)
plotTracks(c(ideoTrack, mafTrack), from=63000000, to=155270560)
dev.off()
```
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.