code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import spynnaker8 as p
import time
from matplotlib import pyplot as plt
import numpy as np
import spynnaker8.spynnaker_plotting as pl
import spynnaker8.utilities.neo_convertor as convert
from pyNN.utility.plotting import Figure, Panel
start_time = time.time()
#time of simulation
TotalDuration = 100.0
#parameters
a = 0.03
b = -2
c = -50
d = 100
#Constant current
current_pulse = 250
current_pulse = current_pulse+100
#Number of output neurons
NumYCells = 10
#Model used
model_Izh = p.Izhikevich
'''Starting the Spinnaker Simulation'''
p.setup(timestep=0.1,min_delay=1.0,max_delay=14.0)
#number of neurons per core
p.set_number_of_neurons_per_core(p.SpikeSourceArray,50)
#setting up the parameters for Izh
cell_params = {'a':a, 'b':b, 'c':c, 'd':d,'i_offset':current_pulse}
y_Izh_population = p.Population(NumYCells, model_Izh(**cell_params),label='Izh_neuron_input')
#recording the spikes and voltage
y_Izh_population.record(["spikes","v"])
#running simulation for total duration
p.run(TotalDuration)
#extracting the membrane potential data in millivolts
y_izh_data = y_Izh_population.get_data(["v","spikes"])
Figure(
#raster plot of the presynaptic neuron spike times
Panel(y_izh_data.segments[0].spiketrains,yticks=True,markersize=0.4, xlim=(0,TotalDuration)),
title="Izh").save("Izh_output.png")
plt.show()
#release spinnaker machine
p.end()
import spynnaker8 as p
import time
from matplotlib import pyplot as plt
import numpy as np
import spynnaker8.spynnaker_plotting as pl
import spynnaker8.utilities.neo_convertor as convert
from pyNN.utility.plotting import Figure, Panel
start_time = time.time()
#time of simulation
TotalDuration = 100.0
#parameters
a = 0.03
b = -2
c = -50
d = 100
#Constant current
current_pulse = 250
current_pulse = current_pulse+100
#Number of output neurons
NumYCells = 10
#Model used
model_Izh = p.Izhikevich
'''Starting the Spinnaker Simulation'''
p.setup(timestep=0.1,min_delay=1.0,max_delay=14.0)
#number of neurons per core
p.set_number_of_neurons_per_core(p.SpikeSourceArray,50)
#setting up the parameters for Izh
cell_params = {'a':a, 'b':b, 'c':c, 'd':d,'i_offset':current_pulse}
y_Izh_population = p.Population(NumYCells, model_Izh(**cell_params),label='Izh_neuron_input')
#recording the spikes and voltage
y_Izh_population.record(["spikes","v"])
#running simulation for total duration
p.run(TotalDuration)
#extracting the membrane potential data in millivolts
# y_izh_data = y_Izh_population.get_data(["v","spikes"])
data = y_Izh_population.get_data().segments[0]
vm = data.filter(name="v")[0]
Figure(
Panel(vm, ylabel="Membrane potential (mV)"),
Panel(data.spiketrains, xlabel="Time (ms)", xticks=True)
).save("simulation_results.png")
# Figure(
# #raster plot of the presynaptic neuron spike times
# Panel(y_izh_data.segments[0].spiketrains,yticks=True,markersize=0.4, xlim=(0,TotalDuration)),
# title="Izh").save("Izh_output.png")
# plt.show()
#release spinnaker machine
p.end()
```
|
github_jupyter
|
import spynnaker8 as p
import time
from matplotlib import pyplot as plt
import numpy as np
import spynnaker8.spynnaker_plotting as pl
import spynnaker8.utilities.neo_convertor as convert
from pyNN.utility.plotting import Figure, Panel
start_time = time.time()
#time of simulation
TotalDuration = 100.0
#parameters
a = 0.03
b = -2
c = -50
d = 100
#Constant current
current_pulse = 250
current_pulse = current_pulse+100
#Number of output neurons
NumYCells = 10
#Model used
model_Izh = p.Izhikevich
'''Starting the Spinnaker Simulation'''
p.setup(timestep=0.1,min_delay=1.0,max_delay=14.0)
#number of neurons per core
p.set_number_of_neurons_per_core(p.SpikeSourceArray,50)
#setting up the parameters for Izh
cell_params = {'a':a, 'b':b, 'c':c, 'd':d,'i_offset':current_pulse}
y_Izh_population = p.Population(NumYCells, model_Izh(**cell_params),label='Izh_neuron_input')
#recording the spikes and voltage
y_Izh_population.record(["spikes","v"])
#running simulation for total duration
p.run(TotalDuration)
#extracting the membrane potential data in millivolts
y_izh_data = y_Izh_population.get_data(["v","spikes"])
Figure(
#raster plot of the presynaptic neuron spike times
Panel(y_izh_data.segments[0].spiketrains,yticks=True,markersize=0.4, xlim=(0,TotalDuration)),
title="Izh").save("Izh_output.png")
plt.show()
#release spinnaker machine
p.end()
import spynnaker8 as p
import time
from matplotlib import pyplot as plt
import numpy as np
import spynnaker8.spynnaker_plotting as pl
import spynnaker8.utilities.neo_convertor as convert
from pyNN.utility.plotting import Figure, Panel
start_time = time.time()
#time of simulation
TotalDuration = 100.0
#parameters
a = 0.03
b = -2
c = -50
d = 100
#Constant current
current_pulse = 250
current_pulse = current_pulse+100
#Number of output neurons
NumYCells = 10
#Model used
model_Izh = p.Izhikevich
'''Starting the Spinnaker Simulation'''
p.setup(timestep=0.1,min_delay=1.0,max_delay=14.0)
#number of neurons per core
p.set_number_of_neurons_per_core(p.SpikeSourceArray,50)
#setting up the parameters for Izh
cell_params = {'a':a, 'b':b, 'c':c, 'd':d,'i_offset':current_pulse}
y_Izh_population = p.Population(NumYCells, model_Izh(**cell_params),label='Izh_neuron_input')
#recording the spikes and voltage
y_Izh_population.record(["spikes","v"])
#running simulation for total duration
p.run(TotalDuration)
#extracting the membrane potential data in millivolts
# y_izh_data = y_Izh_population.get_data(["v","spikes"])
data = y_Izh_population.get_data().segments[0]
vm = data.filter(name="v")[0]
Figure(
Panel(vm, ylabel="Membrane potential (mV)"),
Panel(data.spiketrains, xlabel="Time (ms)", xticks=True)
).save("simulation_results.png")
# Figure(
# #raster plot of the presynaptic neuron spike times
# Panel(y_izh_data.segments[0].spiketrains,yticks=True,markersize=0.4, xlim=(0,TotalDuration)),
# title="Izh").save("Izh_output.png")
# plt.show()
#release spinnaker machine
p.end()
| 0.38341 | 0.512449 |
# Hardy's Paradox
Hardy's Paradox nicely illustrates the fundamental difference of Quantum Mechanics and classical physics. In particular, it can be used to discuss the claim made by Einstein, Podolsky and Rosen ("EPR") back in 1935. They objected to the uncertainty seen in quantum mechanics, and thought it meant that the theory was incomplete. They thought that a qubit should always know what output it would give for both kinds of measurement, and that it only seems random because some information is hidden from us. As Einstein said: God does not play dice with the universe.
The idea and part of the source code for this tutorial was published in a previous version of the [Qiskit Textbook](https://qiskit.org/textbook/), in the (now removed) chapter [The Unique Properties of Qubits](https://github.com/Qiskit/qiskit-textbook/blob/master/content/ch-states/old-unique-properties-qubits.ipynb).
This variant of Hardy's Paradox is a relatively simple example for an entangled qubit state that couldn't be reproduced by a few classical bits and a random number generator. It shows that quantum variables aren't just classical variables with some randomness bundled in.
(hit space or right arrow to move to next slide)
## Usage instructions for the user interface
1. "Ctrl -" and "Ctrl +" (or "command -", "command +") adjust the zoom level to fit the text to the browser window
* Use "space" and "shift space" to navigate through the slides
* "Shift Enter" executes the interactive cells (might need to click the cell, first)
* Execute the interactive cells on each slide ("In [1]:", etc)
* In case a cell is not formatted correctly, try to double-click and then "Shift Enter" to re-execute
* Interactive cells can be modified, if needed
* "X" at the top left exits the slideshow and enters the jupyter notebook interface
## Manufacturing Cars
Let's assume we build cars.
The cars have a color (red or blue) and an engine type (gasoline or diesel).
The director of the production plant enures us that the following is always true for the first two cars that leave the plant each morning:
1. If we look at the colors of both cars, it never happens that both are red.
* If the engine type of one car is diesel, then the other car is red.
Let's encode the two cars with two qubits, and the colors by a measurement in the (standard) Z Basis, where 0 relates to red and 1 relates to blue. The engine type is encoded by a measurement in the X Basis, where 0 relates to gasoline and 1 relates to diesel.
Or in short: <br>
Z color: 0 red, 1 blue <br>
X engine type: 0 gasoline, 1: diesel
We now initialize the quantum circuit and create a specific state of the two qubits.
We will show that this state satisfies the two conditions mentioned before.
We will then analyze the question if both cars can be diesel.
```
from qiskit import *
from qiskit.tools.visualization import plot_histogram
```
The following circuit creates a specific entangled state of the two qubits.
```
# hit "shift + Enter" to execute this cell
q = QuantumRegister(2) # create a quantum register with one qubit
# create a classical register that will hold the results of the measurement
c = ClassicalRegister(2)
qc_hardy = QuantumCircuit(q, c)
qc_hardy.ry(1.911,q[1])
qc_hardy.cx(q[1],q[0])
qc_hardy.ry(0.785,q[0])
qc_hardy.cx(q[1],q[0])
qc_hardy.ry(2.356,q[0])
qc_hardy.draw(output='mpl')
```
Let's see what happens if we look at the color of both cars, i.e. if we make an Z measurement on each of the qubits. <br>
A result of 00 would indicate that both cars are red, which is not allowed by rule #1.
```
measurements = QuantumCircuit(q,c)
# z measurement on both qubits
measurements.measure(q[0],c[0])
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for two z (=color) measurements:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
```
The count of 00 is zero, and so these qubits do indeed satisfy property 1.
Next, let's see the results of an x (engine type) measurement of one and a z (color) measurement of the other.<br>
A result of 11 would indicate that car 1 is a diesel and car two is blue, which is not allowed by rule #2.
```
measurements = QuantumCircuit(q,c)
# x measurement on qubit 0
measurements.h(q[0])
measurements.measure(q[0],c[0])
# z measurement on qubit 1
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for an x (engine type) measurement on qubit 0 and a z (color) measurement on qubit 1:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
```
The count of 11 is zero.
If we also show that the same is true if we measure the other way round (), we have shown that the cars (qubits) satisfy property #2.
```
measurements = QuantumCircuit(q,c)
# z measurement on qubit 0
measurements.measure(q[0],c[0])
# x measurement on qubit 1
measurements.h(q[1])
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for an z (color) measurement on qubit 0 and a x (engien type) measurement on qubit 1:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
```
As result 11 never occurs, property #2 also holds true.
What can we now infer (classically) about the engine types of both cars?
Let's first recall the properties we have confirmed:
1. If we look at the colors of the cars, it never happens that both are red.
* If the engine type of one car is diesel, then the other car is red.
Let's assume we measure the engine type for both cars and both would be diesel. Then by applying property #2, we can deduce what the result would have been if we had made color measurements instead: We would have gotten an output of red for both.
However, this result is impossible according to property #1. We can therefore conclude that it must be impossible that both cars are diesel.
But now let's do an measurement of the engine type for both cars, i.e. an measurement in the x basis for both qubits.
```
measurements = QuantumCircuit(q,c)
measurements.h(q[0])
measurements.measure(q[0],c[0])
measurements.h(q[1])
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for two x (engine type) measurement on both qubits:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
```
The result is surprising, because in a few cases we actually measured 11, which encodes the "impossible case" that both cars are diesel.
We reasoned that, given properties 1 and 2, it would be impossible to get the output 11 if we measure engine type for both cars. From the results above, we see that our reasoning was not correct: one in every dozen results will have this 'impossible' result.
How can we explain this?
## Backgound on Hardy's Paradox
In their famous paper in 1935, EPR essentially claimed that qubits can indeed be described by some form of classical variable. They didn’t know how to do it, but they were sure it could be done. Then quantum mechanics could be replaced by a much nicer and more sensible theory.
It took until 1964 to show that they were wrong. J. S. Bell proved that quantum variables behaved in a way that was fundamentally unique. Since then, many new ways have been found to prove this, and extensive experiments have been done to show that this is exactly the way the universe works. We'll now consider a simple demonstration, using a variant of Hardy’s paradox.
## What went wrong?
Our mistake was in the following piece of reasoning.
* By applying property 2 we can deduce what the result would have been if we had made z measurements instead
We used our knowledge of the x (color) outputs to work out what the z (engine type) outputs were. Once we’d done that, we assumed that we were certain about the value of both.
Our logic would be completely valid if we weren’t reasoning about quantum objects.
But as D.Mermin concludes at the end of his excellent book "...", for quantum objects you have to accept "what didn't happen, didn't happen", i.e. we cannot make an assumptions about a measurement that wasn't done.
This is (part of) what makes quantum computers able to outperform classical computers. It leads to effects that allow programs made with quantum variables to solve problems in ways that those with normal variables cannot. But just because qubits don’t follow the same logic as normal computers, it doesn’t mean they defy logic entirely. They obey the definite rules laid out by quantum mechanics.
```
import qiskit
qiskit.__qiskit_version__
```
## BACKUP / OLD
Hadamard-Gate maps $\;|0\rangle\;$ to $\;\frac{|0\rangle + |1\rangle}{\sqrt{2}}\;\;$ and $\;\;|1\rangle\;$ to $\;\frac{|0\rangle - |1\rangle}{\sqrt{2}}$.
If we can show that
$$ H(\; id( H(|0\rangle) ) \;) = |0\rangle\, $$
and
$$ H(\;\, X( H(|0\rangle) ) \;) = |0\rangle, $$
it becomes clear that if A applies an H-Gate in both of her moves, she wins the game - independent of the move of B (X or id).
Remember: Heads is encoded by $|0\rangle$, Tails encoded by $|1\rangle$.
The first equation holds because:
\begin{align*}
H(\; id(\; H(|0\rangle) \;)\; )
= &\;\; H(\; H(|0\rangle)\; ) \\
= &\;\; H(\; \frac{|0\rangle + |1\rangle}{\sqrt{2}}\;) \\
= &\;\; \frac{1}{\sqrt{2}}\;(\; H(|0\rangle) + H(|1\rangle) \;) \\
= &\;\; \frac{1}{\sqrt{2}}\;(\;\frac{|0\rangle + |1\rangle}{\sqrt{2}} + \frac{|0\rangle - |1\rangle}{\sqrt{2}}\;) \\
= &\;\; \frac{1}{{2}}\; (\;|0\rangle + |1\rangle + |0\rangle - |1\rangle\; )\\
= &\;\; |0\rangle
\end{align*}
In case B choses to use an X-Gate instead of id, the following identity
$$ X(\; H(|0\rangle) \;) = X\; (\;\frac{|0\rangle + |1\rangle}{\sqrt{2}}\; ) = \frac{|1\rangle + |0\rangle}{\sqrt{2}} = H(|0\rangle) $$
can be used to show that the final state is $ |0\rangle$:
$$ H(\; X( H(|0\rangle) ) \;) = H(\; H(|0\rangle)\; ) = |0\rangle $$
[These charts](https://github.com/JanLahmann/Fun-with-Quantum/raw/master/QuantumTheory-for-QuantumCoinGame.pdf) explain a bit more of the quantum theory and formlism required to prove the above identities, in case you are interested.
|
github_jupyter
|
from qiskit import *
from qiskit.tools.visualization import plot_histogram
# hit "shift + Enter" to execute this cell
q = QuantumRegister(2) # create a quantum register with one qubit
# create a classical register that will hold the results of the measurement
c = ClassicalRegister(2)
qc_hardy = QuantumCircuit(q, c)
qc_hardy.ry(1.911,q[1])
qc_hardy.cx(q[1],q[0])
qc_hardy.ry(0.785,q[0])
qc_hardy.cx(q[1],q[0])
qc_hardy.ry(2.356,q[0])
qc_hardy.draw(output='mpl')
measurements = QuantumCircuit(q,c)
# z measurement on both qubits
measurements.measure(q[0],c[0])
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for two z (=color) measurements:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
measurements = QuantumCircuit(q,c)
# x measurement on qubit 0
measurements.h(q[0])
measurements.measure(q[0],c[0])
# z measurement on qubit 1
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for an x (engine type) measurement on qubit 0 and a z (color) measurement on qubit 1:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
measurements = QuantumCircuit(q,c)
# z measurement on qubit 0
measurements.measure(q[0],c[0])
# x measurement on qubit 1
measurements.h(q[1])
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for an z (color) measurement on qubit 0 and a x (engien type) measurement on qubit 1:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
measurements = QuantumCircuit(q,c)
measurements.h(q[0])
measurements.measure(q[0],c[0])
measurements.h(q[1])
measurements.measure(q[1],c[1])
qc = qc_hardy + measurements
print('Results for two x (engine type) measurement on both qubits:')
plot_histogram(execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts())
import qiskit
qiskit.__qiskit_version__
| 0.73678 | 0.990678 |
# Convolutional Neural Networks
In this notebook, I'll try converting radio images into useful features using a simple convolutional neural network in Keras. The best kinds of CNN to use are apparently fast region-based CNNs, but because computer vision is hard and somewhat off-topic I'll instead be doing this pretty naïvely. The [Keras MNIST example](https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py) will be a good starting point.
I'll also use SWIRE to find potential hosts (notebook 13) and pull out radio images surrounding them. I'll be using the frozen ATLAS classifications that I prepared earlier (notebook 12).
```
import collections
import io
from pprint import pprint
import sqlite3
import sys
import warnings
import astropy.io.votable
import astropy.wcs
import matplotlib.pyplot
import numpy
import requests
import requests_cache
import sklearn.cross_validation
%matplotlib inline
sys.path.insert(1, '..')
import crowdastro.data
import crowdastro.labels
import crowdastro.rgz_analysis.consensus
import crowdastro.show
warnings.simplefilter('ignore', UserWarning) # astropy always raises warnings on Windows.
requests_cache.install_cache(cache_name='gator_cache', backend='sqlite', expire_after=None)
def get_potential_hosts(subject):
if subject['metadata']['source'].startswith('C'):
# CDFS
catalog = 'chandra_cat_f05'
else:
# ELAIS-S1
catalog = 'elaiss1_cat_f05'
query = {
'catalog': catalog,
'spatial': 'box',
'objstr': '{} {}'.format(*subject['coords']),
'size': '120',
'outfmt': '3',
}
url = 'http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query'
r = requests.get(url, params=query)
votable = astropy.io.votable.parse_single_table(io.BytesIO(r.content), pedantic=False)
ras = votable.array['ra']
decs = votable.array['dec']
# Convert to px.
fits = crowdastro.data.get_ir_fits(subject)
wcs = astropy.wcs.WCS(fits.header)
xs, ys = wcs.all_world2pix(ras, decs, 0)
return numpy.array((xs, ys)).T
def get_true_hosts(subject, potential_hosts, conn):
consensus_xs = []
consensus_ys = []
consensus = crowdastro.labels.get_subject_consensus(subject, conn, 'atlas_classifications')
true_hosts = {} # Maps radio signature to (x, y) tuples.
for radio, (x, y) in consensus.items():
if x is not None and y is not None:
closest = None
min_distance = float('inf')
for host in potential_hosts:
dist = numpy.hypot(x - host[0], y - host[1])
if dist < min_distance:
closest = host
min_distance = dist
true_hosts[radio] = closest
return true_hosts
```
## Training data
The first step is to separate out all the training data. I'm well aware that having too much training data at once will cause Python to run out of memory, so I'll need to figure out how to deal with that when I get to it.
For each potential host, I'll pull out a $20 \times 20$, $40 \times 40$, and $80 \times 80$ patch of radio image. These numbers are totally arbitrary but they seem like nice sizes. Note that this will miss really spread out black hole jets. I'm probably fine with that.
```
subject = crowdastro.data.db.radio_subjects.find_one({'zooniverse_id': 'ARG0003rga'})
crowdastro.show.subject(subject)
matplotlib.pyplot.show()
crowdastro.show.radio(subject)
matplotlib.pyplot.show()
potential_hosts = get_potential_hosts(subject)
conn = sqlite3.connect('../crowdastro-data/processed.db')
true_hosts = {tuple(i) for i in get_true_hosts(subject, potential_hosts, conn).values()}
conn.close()
xs = []
ys = []
for x, y in true_hosts:
xs.append(x)
ys.append(y)
crowdastro.show.subject(subject)
matplotlib.pyplot.scatter(xs, ys, c='r', s=100)
matplotlib.pyplot.show()
def get_training_data(subject, potential_hosts, true_hosts):
radio_image = crowdastro.data.get_radio(subject, size='5x5')
training_data = []
radius = 40
padding = 150
for host_x, host_y in potential_hosts:
patch_80 = radio_image[int(host_x - radius + padding) : int(host_x + radius + padding),
int(host_y - radius + padding) : int(host_y + radius + padding)]
classification = (host_x, host_y) in true_hosts
training_data.append((patch_80, classification))
return training_data
patches, classifications = zip(*get_training_data(subject, potential_hosts, true_hosts))
```
Now, I'll run this over the ATLAS data.
```
conn = sqlite3.connect('../crowdastro-data/processed.db')
training_inputs = []
training_outputs = []
for index, subject in enumerate(crowdastro.data.get_all_subjects(atlas=True)):
print('Extracting training data from ATLAS subject #{}'.format(index))
potential_hosts = get_potential_hosts(subject)
true_hosts = {tuple(i) for i in get_true_hosts(subject, potential_hosts, conn).values()}
patches, classifications = zip(*get_training_data(subject, potential_hosts, true_hosts))
training_inputs.extend(patches)
training_outputs.extend(classifications)
conn.close()
```
Keras doesn't support class weights, so I need to downsample the non-host galaxies.
```
n_hosts = sum(training_outputs)
n_not_hosts = len(training_outputs) - n_hosts
n_to_discard = n_not_hosts - n_hosts
new_training_inputs = []
new_training_outputs = []
for inp, out in zip(training_inputs, training_outputs):
if not out and n_to_discard > 0:
n_to_discard -= 1
else:
new_training_inputs.append(inp)
new_training_outputs.append(out)
print(sum(new_training_outputs))
print(len(new_training_outputs))
training_inputs = numpy.array(new_training_inputs)
training_outputs = numpy.array(new_training_outputs, dtype=float)
```
## Convolutional neural network
The basic structure will be as follows:
- An input layer.
- A 2D convolution layer with 32 filters and a $10 \times 10$ kernel. (This is the same size kernel that Radio Galaxy Zoo uses for their peak detection.)
- A relu activation layer.
- A max pooling layer with pool size 5.
- A 25% dropout layer.
- A flatten layer.
- A dense layer with 64 nodes.
- A relu activation layer.
- A dense layer with 1 node.
- A sigmoid activation layer.
I may try to split the input into three images of different sizes in future.
```
import keras.layers.convolutional
import keras.layers.core
import keras.models
model = keras.models.Sequential()
n_filters = 32
conv_size = 10
pool_size = 5
dropout = 0.25
hidden_layer_size = 64
model.add(keras.layers.convolutional.Convolution2D(n_filters, conv_size, conv_size,
border_mode='valid',
input_shape=(1, 80, 80)))
model.add(keras.layers.core.Activation('relu'))
model.add(keras.layers.convolutional.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(keras.layers.convolutional.Convolution2D(n_filters, conv_size, conv_size,
border_mode='valid',))
model.add(keras.layers.core.Activation('relu'))
model.add(keras.layers.convolutional.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(keras.layers.core.Dropout(dropout))
model.add(keras.layers.core.Flatten())
model.add(keras.layers.core.Dense(hidden_layer_size))
model.add(keras.layers.core.Activation('sigmoid'))
model.add(keras.layers.core.Dense(1))
model.add(keras.layers.core.Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adadelta')
```
Now we can train it!
```
xs_train, xs_test, ts_train, ts_test = sklearn.cross_validation.train_test_split(
training_inputs, training_outputs, test_size=0.1, random_state=0, stratify=training_outputs)
image_size = xs_train.shape[1:]
xs_train = xs_train.reshape(xs_train.shape[0], 1, image_size[0], image_size[1])
xs_test = xs_test.reshape(xs_test.shape[0], 1, image_size[0], image_size[1])
xs_train.shape
model.fit(xs_train, ts_train)
```
Let's see some filters.
```
get_convolutional_output = keras.backend.function([model.layers[0].input],
[model.layers[2].get_output()])
model.get_weights()[2].shape
figure = matplotlib.pyplot.figure(figsize=(15, 15))
for i in range(32):
ax = figure.add_subplot(8, 4, i+1)
ax.axis('off')
ax.pcolor(model.get_weights()[0][i, 0], cmap='gray')
matplotlib.pyplot.show()
```
Good enough. Now, let's save the models.
```
model_json = model.to_json()
with open('../crowdastro-data/cnn_model_2.json', 'w') as f:
f.write(model_json)
model.save_weights('../crowdastro-data/cnn_weights_2.h5')
```
...Now, let's *test* that it saved.
```
with open('../crowdastro-data/cnn_model_2.json', 'r') as f:
model2 = keras.models.model_from_json(f.read())
model2.load_weights('../crowdastro-data/cnn_weights_2.h5')
figure = matplotlib.pyplot.figure(figsize=(15, 15))
for i in range(32):
ax = figure.add_subplot(8, 4, i+1)
ax.axis('off')
ax.pcolor(model2.get_weights()[0][i, 0], cmap='gray')
matplotlib.pyplot.show()
```
Looks good. Ideally, we train this longer, but I don't have enough time right now. Let's save the training data and move on.
```
import tables
with tables.open_file('../crowdastro-data/atlas_training_data.h5', mode='w', title='ATLAS training data') as f:
root = f.root
f.create_array(root, 'training_inputs', training_inputs)
f.create_array(root, 'training_outputs', training_outputs)
```
|
github_jupyter
|
import collections
import io
from pprint import pprint
import sqlite3
import sys
import warnings
import astropy.io.votable
import astropy.wcs
import matplotlib.pyplot
import numpy
import requests
import requests_cache
import sklearn.cross_validation
%matplotlib inline
sys.path.insert(1, '..')
import crowdastro.data
import crowdastro.labels
import crowdastro.rgz_analysis.consensus
import crowdastro.show
warnings.simplefilter('ignore', UserWarning) # astropy always raises warnings on Windows.
requests_cache.install_cache(cache_name='gator_cache', backend='sqlite', expire_after=None)
def get_potential_hosts(subject):
if subject['metadata']['source'].startswith('C'):
# CDFS
catalog = 'chandra_cat_f05'
else:
# ELAIS-S1
catalog = 'elaiss1_cat_f05'
query = {
'catalog': catalog,
'spatial': 'box',
'objstr': '{} {}'.format(*subject['coords']),
'size': '120',
'outfmt': '3',
}
url = 'http://irsa.ipac.caltech.edu/cgi-bin/Gator/nph-query'
r = requests.get(url, params=query)
votable = astropy.io.votable.parse_single_table(io.BytesIO(r.content), pedantic=False)
ras = votable.array['ra']
decs = votable.array['dec']
# Convert to px.
fits = crowdastro.data.get_ir_fits(subject)
wcs = astropy.wcs.WCS(fits.header)
xs, ys = wcs.all_world2pix(ras, decs, 0)
return numpy.array((xs, ys)).T
def get_true_hosts(subject, potential_hosts, conn):
consensus_xs = []
consensus_ys = []
consensus = crowdastro.labels.get_subject_consensus(subject, conn, 'atlas_classifications')
true_hosts = {} # Maps radio signature to (x, y) tuples.
for radio, (x, y) in consensus.items():
if x is not None and y is not None:
closest = None
min_distance = float('inf')
for host in potential_hosts:
dist = numpy.hypot(x - host[0], y - host[1])
if dist < min_distance:
closest = host
min_distance = dist
true_hosts[radio] = closest
return true_hosts
subject = crowdastro.data.db.radio_subjects.find_one({'zooniverse_id': 'ARG0003rga'})
crowdastro.show.subject(subject)
matplotlib.pyplot.show()
crowdastro.show.radio(subject)
matplotlib.pyplot.show()
potential_hosts = get_potential_hosts(subject)
conn = sqlite3.connect('../crowdastro-data/processed.db')
true_hosts = {tuple(i) for i in get_true_hosts(subject, potential_hosts, conn).values()}
conn.close()
xs = []
ys = []
for x, y in true_hosts:
xs.append(x)
ys.append(y)
crowdastro.show.subject(subject)
matplotlib.pyplot.scatter(xs, ys, c='r', s=100)
matplotlib.pyplot.show()
def get_training_data(subject, potential_hosts, true_hosts):
radio_image = crowdastro.data.get_radio(subject, size='5x5')
training_data = []
radius = 40
padding = 150
for host_x, host_y in potential_hosts:
patch_80 = radio_image[int(host_x - radius + padding) : int(host_x + radius + padding),
int(host_y - radius + padding) : int(host_y + radius + padding)]
classification = (host_x, host_y) in true_hosts
training_data.append((patch_80, classification))
return training_data
patches, classifications = zip(*get_training_data(subject, potential_hosts, true_hosts))
conn = sqlite3.connect('../crowdastro-data/processed.db')
training_inputs = []
training_outputs = []
for index, subject in enumerate(crowdastro.data.get_all_subjects(atlas=True)):
print('Extracting training data from ATLAS subject #{}'.format(index))
potential_hosts = get_potential_hosts(subject)
true_hosts = {tuple(i) for i in get_true_hosts(subject, potential_hosts, conn).values()}
patches, classifications = zip(*get_training_data(subject, potential_hosts, true_hosts))
training_inputs.extend(patches)
training_outputs.extend(classifications)
conn.close()
n_hosts = sum(training_outputs)
n_not_hosts = len(training_outputs) - n_hosts
n_to_discard = n_not_hosts - n_hosts
new_training_inputs = []
new_training_outputs = []
for inp, out in zip(training_inputs, training_outputs):
if not out and n_to_discard > 0:
n_to_discard -= 1
else:
new_training_inputs.append(inp)
new_training_outputs.append(out)
print(sum(new_training_outputs))
print(len(new_training_outputs))
training_inputs = numpy.array(new_training_inputs)
training_outputs = numpy.array(new_training_outputs, dtype=float)
import keras.layers.convolutional
import keras.layers.core
import keras.models
model = keras.models.Sequential()
n_filters = 32
conv_size = 10
pool_size = 5
dropout = 0.25
hidden_layer_size = 64
model.add(keras.layers.convolutional.Convolution2D(n_filters, conv_size, conv_size,
border_mode='valid',
input_shape=(1, 80, 80)))
model.add(keras.layers.core.Activation('relu'))
model.add(keras.layers.convolutional.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(keras.layers.convolutional.Convolution2D(n_filters, conv_size, conv_size,
border_mode='valid',))
model.add(keras.layers.core.Activation('relu'))
model.add(keras.layers.convolutional.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(keras.layers.core.Dropout(dropout))
model.add(keras.layers.core.Flatten())
model.add(keras.layers.core.Dense(hidden_layer_size))
model.add(keras.layers.core.Activation('sigmoid'))
model.add(keras.layers.core.Dense(1))
model.add(keras.layers.core.Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adadelta')
xs_train, xs_test, ts_train, ts_test = sklearn.cross_validation.train_test_split(
training_inputs, training_outputs, test_size=0.1, random_state=0, stratify=training_outputs)
image_size = xs_train.shape[1:]
xs_train = xs_train.reshape(xs_train.shape[0], 1, image_size[0], image_size[1])
xs_test = xs_test.reshape(xs_test.shape[0], 1, image_size[0], image_size[1])
xs_train.shape
model.fit(xs_train, ts_train)
get_convolutional_output = keras.backend.function([model.layers[0].input],
[model.layers[2].get_output()])
model.get_weights()[2].shape
figure = matplotlib.pyplot.figure(figsize=(15, 15))
for i in range(32):
ax = figure.add_subplot(8, 4, i+1)
ax.axis('off')
ax.pcolor(model.get_weights()[0][i, 0], cmap='gray')
matplotlib.pyplot.show()
model_json = model.to_json()
with open('../crowdastro-data/cnn_model_2.json', 'w') as f:
f.write(model_json)
model.save_weights('../crowdastro-data/cnn_weights_2.h5')
with open('../crowdastro-data/cnn_model_2.json', 'r') as f:
model2 = keras.models.model_from_json(f.read())
model2.load_weights('../crowdastro-data/cnn_weights_2.h5')
figure = matplotlib.pyplot.figure(figsize=(15, 15))
for i in range(32):
ax = figure.add_subplot(8, 4, i+1)
ax.axis('off')
ax.pcolor(model2.get_weights()[0][i, 0], cmap='gray')
matplotlib.pyplot.show()
import tables
with tables.open_file('../crowdastro-data/atlas_training_data.h5', mode='w', title='ATLAS training data') as f:
root = f.root
f.create_array(root, 'training_inputs', training_inputs)
f.create_array(root, 'training_outputs', training_outputs)
| 0.422028 | 0.893495 |
<h1 align='center'>8.1 Hierarchical Indexing
Hierarchical indexing is an important feature of pandas that enables you to have mul‐tiple (two or more) index levels on an axis.
Somewhat abstractly, it provides a way foryou to work with higher dimensional data in a lower dimensional form
```
import pandas as pd
import numpy as np
data = pd.Series(np.random.randn(9),
index=[['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'd'],
[1, 2, 3, 1, 3, 1, 2, 2, 3]])
data
data.index
```
With a hierarchically indexed object, so-called partial indexing is possible, enablingyou to concisely select subsets of the data
```
data.loc[['b', 'd']]
```
Selection is even possible from an “inner” level:
```
data.loc[:, 2]
```
Hierarchical indexing plays an important role in reshaping data and group-basedoperations like forming a pivot table.
For example, you could rearrange the data intoa DataFrame using its unstack method
```
data.unstack()
data.unstack().stack()
```
With a DataFrame, either axis can have a hierarchical index
```
frame = pd.DataFrame(np.arange(12).reshape((4, 3)),
index=[['a', 'a', 'b', 'b'],
[1, 2, 1, 2]],
columns=[['Ohio', 'Ohio', 'Colorado'],
['Green', 'Red', 'Green']])
frame
frame.stack()
```
The hierarchical levels can have names (as strings or any Python objects). If so, thesewill show up in the console output:
```
frame.index.names = ['key1', 'key2']
frame.columns.names = ['state', 'color']
frame
```
With partial column indexing you can similarly select groups of columns:
```
frame1=frame.copy()
frame1.index= ['key1', 'key2','key3','key4']
frame1.columns= ['state', 'color', 'misc']
frame1
```
A MultiIndex can be created by itself and then reused; the columns in the precedingDataFrame with level names could be created like this:
<b> Reordering and Sorting Levels
At times you will need to rearrange the order of the levels on an axis or sort the databy the values in one specific level.
The swaplevel takes two level numbers or namesand returns a new object with the levels
interchanged (but the data is otherwise unaltered)
```
frame.swaplevel('key1', 'key2')
```
Sort_index, on the other hand, sorts the data using only the values in a single level.
When swapping levels, it’s not uncommon to also use sort_index so that the result islexicographically sorted by the indicated level
```
frame.sort_index(level=1)
frame.swaplevel(0, 1).sort_index(level=0)
```
Data selection performance is much better on hierarchically indexed objects if the index is lexicographically sorted starting with the outermost level—that is,the result of callingsort_index(level=0) or sort_index()
<b>Summary Statistics by Level
Many descriptive and summary statistics on DataFrame and Series have a leveloption in which you can specify the level you want to aggregate by on a particularaxis.
Consider the above DataFrame; we can aggregate by level on either the rows orcolumns like so
```
frame
frame.sum(level='key2')
frame.sum(level='color', axis=1)
```
<b>Indexing with a DataFrame’s columns
It’s not unusual to want to use one or more columns from a DataFrame as the rowindex; alternatively, you may wish to move the row index into the DataFrame’s col‐umns. Here’s an example DataFrame
```
frame = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1),
'c': ['one', 'one', 'one', 'two', 'two','two', 'two'],
'd': [0, 1, 2, 0, 1, 2, 3]})
frame
```
DataFrame’s set_index function will create a new DataFrame using one or more ofits columns as the index
```
frame.set_index(['c','d'])
frame.set_index(['c', 'd'], drop=False)
```
reset_index, on the other hand, does the opposite of set_index; the hierarchicalindex levels are moved into the columns
```
framex=frame.set_index(['c','d'])
framex
framex.reset_index()
```
|
github_jupyter
|
import pandas as pd
import numpy as np
data = pd.Series(np.random.randn(9),
index=[['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'd'],
[1, 2, 3, 1, 3, 1, 2, 2, 3]])
data
data.index
data.loc[['b', 'd']]
data.loc[:, 2]
data.unstack()
data.unstack().stack()
frame = pd.DataFrame(np.arange(12).reshape((4, 3)),
index=[['a', 'a', 'b', 'b'],
[1, 2, 1, 2]],
columns=[['Ohio', 'Ohio', 'Colorado'],
['Green', 'Red', 'Green']])
frame
frame.stack()
frame.index.names = ['key1', 'key2']
frame.columns.names = ['state', 'color']
frame
frame1=frame.copy()
frame1.index= ['key1', 'key2','key3','key4']
frame1.columns= ['state', 'color', 'misc']
frame1
frame.swaplevel('key1', 'key2')
frame.sort_index(level=1)
frame.swaplevel(0, 1).sort_index(level=0)
frame
frame.sum(level='key2')
frame.sum(level='color', axis=1)
frame = pd.DataFrame({'a': range(7), 'b': range(7, 0, -1),
'c': ['one', 'one', 'one', 'two', 'two','two', 'two'],
'd': [0, 1, 2, 0, 1, 2, 3]})
frame
frame.set_index(['c','d'])
frame.set_index(['c', 'd'], drop=False)
framex=frame.set_index(['c','d'])
framex
framex.reset_index()
| 0.225672 | 0.966726 |
<a href="https://colab.research.google.com/github/plaupla/awsProject1BikeSharing/blob/main/project_template.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Predict Bike Sharing Demand with AutoGluon Template
## Project: Predict Bike Sharing Demand with AutoGluon
This notebook is a template with each step that you need to complete for the project.
Please fill in your code where there are explicit `?` markers in the notebook. You are welcome to add more cells and code as you see fit.
Once you have completed all the code implementations, please export your notebook as a HTML file so the reviews can view your code. Make sure you have all outputs correctly outputted.
`File-> Export Notebook As... -> Export Notebook as HTML`
There is a writeup to complete as well after all code implememtation is done. Please answer all questions and attach the necessary tables and charts. You can complete the writeup in either markdown or PDF.
Completing the code template and writeup template will cover all of the rubric points for this project.
The rubric contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this notebook and also discuss the results in the writeup file.
## Step 1: Create an account with Kaggle
### Create Kaggle Account and download API key
Below is example of steps to get the API username and key. Each student will have their own username and key.
1. Open account settings.


2. Scroll down to API and click Create New API Token.


3. Open up `kaggle.json` and use the username and key.

## Step 2: Download the Kaggle dataset using the kaggle python library
### Open up Sagemaker Studio and use starter template
1. Notebook should be using a `ml.t3.medium` instance (2 vCPU + 4 GiB)
2. Notebook should be using kernal: `Python 3 (MXNet 1.8 Python 3.7 CPU Optimized)`
### Install packages
```
!pip install -U pip
!pip install -U setuptools wheel
!pip install -U "mxnet<2.0.0" bokeh==2.0.1
!pip install autogluon --no-cache-dir
# Without --no-cache-dir, smaller aws instances may have trouble installing
```
### Setup Kaggle API Key
```
# create the .kaggle directory and an empty kaggle.json file
!mkdir -p /root/.kaggle
!touch /root/.kaggle/kaggle.json
!chmod 600 /root/.kaggle/kaggle.json
# Fill in your user name and key from creating the kaggle account and API token file
import json
kaggle_username = "asamiyuu"
kaggle_key = "76374006b2b499f499899d9dae40d605"
# Save API token the kaggle.json file
with open("/root/.kaggle/kaggle.json", "w") as f:
f.write(json.dumps({"username": kaggle_username, "key": kaggle_key}))
```
### Download and explore dataset
### Go to the bike sharing demand competition and agree to the terms

```
# Download the dataset, it will be in a .zip file so you'll need to unzip it as well.
!kaggle competitions download -c bike-sharing-demand
# If you already downloaded it you can use the -o command to overwrite the file
!unzip -o bike-sharing-demand.zip
import pandas as pd
from autogluon.tabular import TabularPredictor
# Create the train dataset in pandas by reading the csv
# Set the parsing of the datetime column so you can use some of the `dt` features in pandas later
train = pd.read_csv("train.csv", parse_dates = ["datetime"])
train.head()
# Simple output of the train dataset to view some of the min/max/varition of the dataset features.
train.describe
# Create the test pandas dataframe in pandas by reading the csv, remember to parse the datetime!
test = pd.read_csv("test.csv", parse_dates = ["datetime"])
test.head()
# Same thing as train and test dataset
submission = pd.read_csv("sampleSubmission.csv", parse_dates = ["datetime"])
submission.head()
```
## Step 3: Train a model using AutoGluon’s Tabular Prediction
Requirements:
* We are prediting `count`, so it is the label we are setting.
* Ignore `casual` and `registered` columns as they are also not present in the test dataset.
* Use the `root_mean_squared_error` as the metric to use for evaluation.
* Set a time limit of 10 minutes (600 seconds).
* Use the preset `best_quality` to focus on creating the best model.
```
predictor = TabularPredictor(label="count", learner_kwargs={'ignored_columns': ["casual", "registered"]}).fit(
train_data=train, time_limit=600, presets="best_quality"
)
```
### Review AutoGluon's training run with ranking of models that did the best.
```
predictor.fit_summary()
```
### Create predictions from test dataset
```
predictions = predictor.predict(test)
predictions.head()
```
#### NOTE: Kaggle will reject the submission if we don't set everything to be > 0.
```
# Describe the `predictions` series to see if there are any negative values
predictions.lt(0).value_counts()
# How many negative values do we have?
predictions.iloc[predictions<0] = 0
predictions.lt(0).value_counts()
# Set them to zero
submission["count"] = predictions
submission.to_csv("submission.csv", index=False)
```
### Set predictions to submission dataframe, save, and submit
```
submission["count"] = predictions
submission.to_csv("submission.csv", index=False)
!kaggle competitions submit -c bike-sharing-demand -f submission.csv -m "first raw submission"
```
#### View submission via the command line or in the web browser under the competition's page - `My Submissions`
```
!kaggle competitions submissions -c bike-sharing-demand | tail -n +1 | head -n 6
```
#### Initial score of `?`
## Step 4: Exploratory Data Analysis and Creating an additional feature
* Any additional feature will do, but a great suggestion would be to separate out the datetime into hour, day, or month parts.
```
# Create a histogram of all features to show the distribution of each one relative to the data. This is part of the exploritory data analysis
train.hist()
# create a new feature
train["year"] = train.datetime.dt.year
train["month"] = train.datetime.dt.month
train["day"] = train.datetime.dt.day
train.drop(["datetime"], axis=1, inplace=True)
test["year"] = test.datetime.dt.year
test["month"] = test.datetime.dt.month
test["day"] = test.datetime.dt.day
test.drop(["datetime"], axis=1, inplace=True)
train.head()
test.head()
```
## Make category types for these so models know they are not just numbers
* AutoGluon originally sees these as ints, but in reality they are int representations of a category.
* Setting the dtype to category will classify these as categories in AutoGluon.
```
train["season"] = train["season"].astype("category")
train["weather"] = train["weather"].astype("category")
test["season"] = test["season"].astype("category")
test["weather"] = test["weather"].astype("category")
# View are new feature
train.head()
# View histogram of all features again now with the hour feature
train.hist()
```
## Step 5: Rerun the model with the same settings as before, just with more features
```
predictor_new_features = TabularPredictor(label="count", learner_kwargs={'ignored_columns': ["casual", "registered"]}).fit(
train_data=train, time_limit=600, presets="best_quality"
)
predictor_new_features.fit_summary()
# Remember to set all negative values to zero
predictions_new_features = predictor_new_features.predict(test)
predictions_new_features.head()
# Same submitting predictions
submission_new_features = pd.read_csv('./sampleSubmission.csv', parse_dates=["datetime"])
submission_new_features["count"] = predictions_new_features
submission_new_features.to_csv("submission_new_features.csv", index=False)
!kaggle competitions submit -c bike-sharing-demand -f submission_new_features.csv -m "new features"
!kaggle competitions submissions -c bike-sharing-demand | tail -n +1 | head -n 6
```
#### New Score of `?`
## Step 6: Hyper parameter optimization
* There are many options for hyper parameter optimization.
* Options are to change the AutoGluon higher level parameters or the individual model hyperparameters.
* The hyperparameters of the models themselves that are in AutoGluon. Those need the `hyperparameter` and `hyperparameter_tune_kwargs` arguments.
```
import autogluon.core as ag
predictor_new_hpo = TabularPredictor(label="count", eval_metric="root_mean_squared_error", learner_kwargs={'ignored_columns': ["casual", "registered"]}).fit(
train_data=train, time_limit=600, num_bag_folds=5, num_bag_sets=1, num_stack_levels=1, presets="best_quality"
)
predictor_new_hpo.fit_summary()
# Remember to set all negative values to zero
predictions_new_hpo = predictor_new_hpo.predict(test)
predictions_new_hpo.iloc[predictions_new_hpo.lt(0)] = 0
# Same submitting predictions
submission_new_hpo = pd.read_csv('./sampleSubmission.csv', parse_dates=["datetime"])
submission_new_hpo["count"] = predictions_new_hpo
submission_new_hpo.to_csv("submission_new_hpo.csv", index=False)
!kaggle competitions submit -c bike-sharing-demand -f submission_new_hpo.csv -m "new features with hyperparameters"
!kaggle competitions submissions -c bike-sharing-demand | tail -n +1 | head -n 6
```
#### New Score of `?`
## Step 7: Write a Report
### Refer to the markdown file for the full report
### Creating plots and table for report
```
import pandas as pd
import matplotlib.pyplot as plt
# Taking the top model score from each training run and creating a line plot to show improvement
# You can create these in the notebook and save them to PNG or use some other tool (e.g. google sheets, excel)
fig = pd.DataFrame(
{
"model": ["initial", "add_features", "hpo"],
"score": [ 1.39377 , 1.33121 , 1.32740]
}
).plot(x="model", y="score", figsize=(8, 6)).get_figure()
fig.savefig('model_train_score.png')
# Take the 3 kaggle scores and creating a line plot to show improvement
fig = pd.DataFrame(
{
"test_eval": ["initial", "add_features", "WeightedEnsemble", "WeightedEnsemble"],
"score": [ 1.39377 , 1.33121, 1.31933 , 1.32740 ]
}
).plot(x="test_eval", y="score", figsize=(8, 6)).get_figure()
fig.savefig('model_test_score.png')
```
### Hyperparameter table
```
# The 3 hyperparameters we tuned with the kaggle score as the result
pd.DataFrame({
"model": ["initial", "add_features", "WeightedEnsemble"],
"WeightedEnsemble": [0, 0, 5],
"WeightedEnsemble": [20, 20, 1],
"WeightedEnsemble": [0, 0, 1],
"score": [1.33883, 1.39281, 1.32740 ]
})
fig = pd.DataFrame(
{
"model": ["initial", "add_features", "hpo"],
"WeightedEnsemble": [0, 0, 5],
"WeightedEnsemble": [20, 20, 1],
"WeightedEnsemble": [0, 0, 1],
"score": [1.33883 , 1.39281 , 1.32740 ]
}
).plot(x="model", y="score", figsize=(8, 6)).get_figure()
fig.savefig('model_test2_score.png')
```
|
github_jupyter
|
!pip install -U pip
!pip install -U setuptools wheel
!pip install -U "mxnet<2.0.0" bokeh==2.0.1
!pip install autogluon --no-cache-dir
# Without --no-cache-dir, smaller aws instances may have trouble installing
# create the .kaggle directory and an empty kaggle.json file
!mkdir -p /root/.kaggle
!touch /root/.kaggle/kaggle.json
!chmod 600 /root/.kaggle/kaggle.json
# Fill in your user name and key from creating the kaggle account and API token file
import json
kaggle_username = "asamiyuu"
kaggle_key = "76374006b2b499f499899d9dae40d605"
# Save API token the kaggle.json file
with open("/root/.kaggle/kaggle.json", "w") as f:
f.write(json.dumps({"username": kaggle_username, "key": kaggle_key}))
# Download the dataset, it will be in a .zip file so you'll need to unzip it as well.
!kaggle competitions download -c bike-sharing-demand
# If you already downloaded it you can use the -o command to overwrite the file
!unzip -o bike-sharing-demand.zip
import pandas as pd
from autogluon.tabular import TabularPredictor
# Create the train dataset in pandas by reading the csv
# Set the parsing of the datetime column so you can use some of the `dt` features in pandas later
train = pd.read_csv("train.csv", parse_dates = ["datetime"])
train.head()
# Simple output of the train dataset to view some of the min/max/varition of the dataset features.
train.describe
# Create the test pandas dataframe in pandas by reading the csv, remember to parse the datetime!
test = pd.read_csv("test.csv", parse_dates = ["datetime"])
test.head()
# Same thing as train and test dataset
submission = pd.read_csv("sampleSubmission.csv", parse_dates = ["datetime"])
submission.head()
predictor = TabularPredictor(label="count", learner_kwargs={'ignored_columns': ["casual", "registered"]}).fit(
train_data=train, time_limit=600, presets="best_quality"
)
predictor.fit_summary()
predictions = predictor.predict(test)
predictions.head()
# Describe the `predictions` series to see if there are any negative values
predictions.lt(0).value_counts()
# How many negative values do we have?
predictions.iloc[predictions<0] = 0
predictions.lt(0).value_counts()
# Set them to zero
submission["count"] = predictions
submission.to_csv("submission.csv", index=False)
submission["count"] = predictions
submission.to_csv("submission.csv", index=False)
!kaggle competitions submit -c bike-sharing-demand -f submission.csv -m "first raw submission"
!kaggle competitions submissions -c bike-sharing-demand | tail -n +1 | head -n 6
# Create a histogram of all features to show the distribution of each one relative to the data. This is part of the exploritory data analysis
train.hist()
# create a new feature
train["year"] = train.datetime.dt.year
train["month"] = train.datetime.dt.month
train["day"] = train.datetime.dt.day
train.drop(["datetime"], axis=1, inplace=True)
test["year"] = test.datetime.dt.year
test["month"] = test.datetime.dt.month
test["day"] = test.datetime.dt.day
test.drop(["datetime"], axis=1, inplace=True)
train.head()
test.head()
train["season"] = train["season"].astype("category")
train["weather"] = train["weather"].astype("category")
test["season"] = test["season"].astype("category")
test["weather"] = test["weather"].astype("category")
# View are new feature
train.head()
# View histogram of all features again now with the hour feature
train.hist()
predictor_new_features = TabularPredictor(label="count", learner_kwargs={'ignored_columns': ["casual", "registered"]}).fit(
train_data=train, time_limit=600, presets="best_quality"
)
predictor_new_features.fit_summary()
# Remember to set all negative values to zero
predictions_new_features = predictor_new_features.predict(test)
predictions_new_features.head()
# Same submitting predictions
submission_new_features = pd.read_csv('./sampleSubmission.csv', parse_dates=["datetime"])
submission_new_features["count"] = predictions_new_features
submission_new_features.to_csv("submission_new_features.csv", index=False)
!kaggle competitions submit -c bike-sharing-demand -f submission_new_features.csv -m "new features"
!kaggle competitions submissions -c bike-sharing-demand | tail -n +1 | head -n 6
import autogluon.core as ag
predictor_new_hpo = TabularPredictor(label="count", eval_metric="root_mean_squared_error", learner_kwargs={'ignored_columns': ["casual", "registered"]}).fit(
train_data=train, time_limit=600, num_bag_folds=5, num_bag_sets=1, num_stack_levels=1, presets="best_quality"
)
predictor_new_hpo.fit_summary()
# Remember to set all negative values to zero
predictions_new_hpo = predictor_new_hpo.predict(test)
predictions_new_hpo.iloc[predictions_new_hpo.lt(0)] = 0
# Same submitting predictions
submission_new_hpo = pd.read_csv('./sampleSubmission.csv', parse_dates=["datetime"])
submission_new_hpo["count"] = predictions_new_hpo
submission_new_hpo.to_csv("submission_new_hpo.csv", index=False)
!kaggle competitions submit -c bike-sharing-demand -f submission_new_hpo.csv -m "new features with hyperparameters"
!kaggle competitions submissions -c bike-sharing-demand | tail -n +1 | head -n 6
import pandas as pd
import matplotlib.pyplot as plt
# Taking the top model score from each training run and creating a line plot to show improvement
# You can create these in the notebook and save them to PNG or use some other tool (e.g. google sheets, excel)
fig = pd.DataFrame(
{
"model": ["initial", "add_features", "hpo"],
"score": [ 1.39377 , 1.33121 , 1.32740]
}
).plot(x="model", y="score", figsize=(8, 6)).get_figure()
fig.savefig('model_train_score.png')
# Take the 3 kaggle scores and creating a line plot to show improvement
fig = pd.DataFrame(
{
"test_eval": ["initial", "add_features", "WeightedEnsemble", "WeightedEnsemble"],
"score": [ 1.39377 , 1.33121, 1.31933 , 1.32740 ]
}
).plot(x="test_eval", y="score", figsize=(8, 6)).get_figure()
fig.savefig('model_test_score.png')
# The 3 hyperparameters we tuned with the kaggle score as the result
pd.DataFrame({
"model": ["initial", "add_features", "WeightedEnsemble"],
"WeightedEnsemble": [0, 0, 5],
"WeightedEnsemble": [20, 20, 1],
"WeightedEnsemble": [0, 0, 1],
"score": [1.33883, 1.39281, 1.32740 ]
})
fig = pd.DataFrame(
{
"model": ["initial", "add_features", "hpo"],
"WeightedEnsemble": [0, 0, 5],
"WeightedEnsemble": [20, 20, 1],
"WeightedEnsemble": [0, 0, 1],
"score": [1.33883 , 1.39281 , 1.32740 ]
}
).plot(x="model", y="score", figsize=(8, 6)).get_figure()
fig.savefig('model_test2_score.png')
| 0.64232 | 0.956594 |
[Home Page](../Start_Here.ipynb)
     
     
     
     
   
[Next Notebook](CNN's.ipynb)
# CNN Primer and Keras 101
In this notebook, participants will be introduced to CNN, implement it using Keras. For an absolute beginner this notebook would serve as a good starting point.
**Contents of the this notebook:**
- [How a Deep Learning project is planned ?](#Machine-Learning-Pipeline)
- [Wrapping things up with an example ( Classification )](#Image-Classification-on-types-of-clothes)
**By the end of this notebook participant will:**
- Understand the Machine Learning Pipeline
- Write a Deep Learning Classifier and train it.
**We will be building a _Multi-class Classifier_ to classify images of clothing to their respective classes**
## Machine Learning Pipeline
During the bootcamp we will be making use of the following buckets to help us understand how a Machine Learning project should be planned and executed:
1. **Data**: To start with any ML project we need data which is pre-processed and can be fed into the network.
2. **Task**: There are many tasks present in ML, we need to make sure we understand and define the problem statement accurately.
3. **Model**: We need to build our model, which is neither too deep and complex, thereby taking a lot of computational power or too small that it could not learn the important features.
4. **Loss**: Out of the many _loss functions_ present, we need to carefully choose a _loss function_ which is suitable for the task we are about to carry out.
5. **Learning**: As we mentioned in our last notebook, there are a variety of _optimisers_ each with their advantages and disadvantages. So here we choose an _optimiser_ which is suitable for our task and train our model using the set hyperparameters.
6. **Evaluation**: This is a crucial step in the process to determine if our model has learnt the features properly by analysing how it performs when unseen data is given to it.
**Here we will be building a _Multi-class Classifier_ to classify images of clothing to their respective classes.**
We will follow the above discussed pipeline to complete the example.
## Image Classification on types of clothes
#### Step -1 : Data
We will be using the **F-MNIST ( Fashion MNIST )** dataset, which is a very popular dataset. This dataset contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels).
<img src="images/fashion-mnist.png" alt="Fashion MNIST sprite" width="600">
*Source: https://www.tensorflow.org/tutorials/keras/classification*
```
# Import Necessary Libraries
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# Let's Import the Dataset
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
Loading the dataset returns four NumPy arrays:
* The `train_images` and `train_labels` arrays are the *training set*—the data the model uses to learn.
* The model is tested against the *test set*, the `test_images`, and `test_labels` arrays.
The images are 28x28 NumPy arrays, with pixel values ranging from 0 to 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Each image is mapped to a single label. Since the *class names* are not included with the dataset, let us store them in an array so that we can use them later when plotting the images:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Understanding the Data
```
#Print Array Size of Training Set
print("Size of Training Images :"+str(train_images.shape))
#Print Array Size of Label
print("Size of Training Labels :"+str(train_labels.shape))
#Print Array Size of Test Set
print("Size of Test Images :"+str(test_images.shape))
#Print Array Size of Label
print("Size of Test Labels :"+str(test_labels.shape))
#Let's See how our Outputs Look like
print("Training Set Labels :"+str(train_labels))
#Data in the Test Set
print("Test Set Labels :"+str(test_labels))
```
## Data Pre-processing
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
```
The image pixel values range from 0 to 255. Let us now normalise the data range from 0 - 255 to 0 - 1 in both the *Train* and *Test* set. This Normalisation of pixels helps us by optimizing the process where the gradients are computed.
```
train_images = train_images / 255.0
test_images = test_images / 255.0
# Let's Print to Veryify if the Data is of the correct format.
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
```
## Defining our Model
Our Model has three layers :
- 784 Input features ( 28 * 28 )
- 128 nodes in hidden layer (Feel free to experiment with the value)
- 10 output nodes to denote the Class
Implementing the same in Keras ( Machine Learning framework built on top of Tensorflow, Theano, etc..)
```
from tensorflow.keras import backend as K
K.clear_session()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
```
The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely connected, or fully connected, neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer is a 10-node *softmax* layer that returns an array of 10 probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the 10 classes.
### Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:
* *Loss function* —This measures how accurate the model is during training. You want to minimize this function to "steer" the model in the right direction.
* *Optimizer* —This is how the model is updated based on the data it sees and its loss function.
* *Metrics* —Used to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Train the model
Training the neural network model requires the following steps:
1. Feed the training data to the model. In this example, the training data is in the `train_images` and `train_labels` arrays.
2. The model learns to associate images and labels.
3. You ask the model to make predictions about a test set—in this example, the `test_images` array. Verify that the predictions match the labels from the `test_labels` array.
To start training, call the `model.fit` method—so called because it "fits" the model to the training data:
```
model.fit(train_images, train_labels ,epochs=5)
```
## Evaluate accuracy
Next, compare how the model performs on the test dataset:
```
#Evaluating the Model using the Test Set
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
```
## Exercise
Try adding more dense layers to the network above and observe change in accuracy.
We get an Accuracy of 87% in the Test dataset which is less than the 89% we got during the Training phase, This problem in ML is called as Overfitting
## Important:
<mark>Shutdown the kernel before clicking on “Next Notebook” to free up the GPU memory.</mark>
## Licensing
This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
[Home Page](../Start_Here.ipynb)
     
     
     
     
   
[Next Notebook](CNN's.ipynb)
|
github_jupyter
|
# Import Necessary Libraries
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# Let's Import the Dataset
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
#Print Array Size of Training Set
print("Size of Training Images :"+str(train_images.shape))
#Print Array Size of Label
print("Size of Training Labels :"+str(train_labels.shape))
#Print Array Size of Test Set
print("Size of Test Images :"+str(test_images.shape))
#Print Array Size of Label
print("Size of Test Labels :"+str(test_labels.shape))
#Let's See how our Outputs Look like
print("Training Set Labels :"+str(train_labels))
#Data in the Test Set
print("Test Set Labels :"+str(test_labels))
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
# Let's Print to Veryify if the Data is of the correct format.
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
from tensorflow.keras import backend as K
K.clear_session()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels ,epochs=5)
#Evaluating the Model using the Test Set
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
| 0.735926 | 0.992415 |
# Opdracht 1.1

# Opdracht 1.2

$
R=
\begin{bmatrix}
-2 & 0 & 3 & -10\\
\end{bmatrix}
$
# Opdracht 1.3
In de twee regels hieronder heb ik twee voorbeelden genomen van een route die afgelegd kan worden door een agent. Alle rewards van de states die de agent heeft gehad worden bij elkaar opgeteld, en zo is er gekomen tot de volgende uitkomsten.
$
G_{t} = R(Rain) + R(Cloudy) + R(Sunny) + R(Meteor) = -2 + 0 + 3 + -10 = -9\\
G_{t} = R(Cloudy) + R(Sunny) + R(Cloudy) + R(Sunny) + R(Cloudy) + R(Sunny) + R(Meteor) = 0 + 3 + 0 + 3 + 0 + 3 + -10 = -1
$
# Opdracht 1.4
| | init | iter 1 | iter 2 |
|----|------|--------|--------|
| s1 | 0 | 0.1 | 6.56 |
| s2 | 0 | 17.8 | 17.96 |
| s3 | 0 | 1.6 | 2.15 |
| s4 | 0 | 1.6 | 5.17 |
| s5 | 0 | 0 | 0 |
# Opdracht 1.5
# Opdracht 2
Hieronder heb ik value iteratie uitgevoerd met de methode de eerst was uit gelegd bij opdracht 1.4. Ik merkte dat het verschil van de nieuwe values steeds kleiner werd, ten opzichte van de values die ik daarvoor berekend had. Dus heb ik voor mijzelf besloten om te stoppen met de value iteration toen het verschil kleiner was dan 0.01. Ik zou nog langer kunnen door itereren maar dat heeft weinig zin aangezien de values niet veel meer veranderen. Ook heb ik ter visualisatie een plot gemaakt van de values, ook dit laat zien dat de values niet veel meer veranderen.
| | init | iter 1 | iter 2 | iter 3 | iter 4 | iter 5 | iter 6 | iter 7 | iter 8 | iter 9 | iter 10 | iter 11 | iter 12 | iter 13 |
|----|------|--------|--------|--------|--------|---------|---------|----------|----------|-----------|-----------|------------|------------|-------------|
| s0 | 0 | -0.1 | -0.62 | -0.7 | -0.975 | -1 | -1.1375 | -1.15 | -1.21875 | -1.225 | -1.259375 | -1.2625 | -1.2796875 | -1.28125 |
| s1 | 0 | -0.55 | -0.6 | -0.875 | -0.9 | -1.0375 | -1.05 | -1.11875 | -1.125 | -1.159375 | -1.1625 | -1.1796875 | -1.18125 | -1.18984375 |
| s2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
```
import matplotlib.pyplot as plt
x = [i for i in range(14)]
s0 = [0, -0.1, -0.62, -0.7, -0.975, -1, -1.1375, -1.15, -1.21875, -1.225, -1.259375, -1.2625, -1.2796875, -1.28125]
s1 = [0, -0.55, -0.6, -0.875, -0.9, -1.0375, -1.05, -1.11875, -1.125, -1.159375, -1.1625, -1.1796875, -1.18125, -1.18984375]
s2 = [0 for i in range(14)]
plt.plot(x, s0)
plt.plot(x, s1)
plt.plot(x, s2)
plt.legend(["s0", "s1", "s2"])
plt.title("Value Iteration (Opdracht 2)")
plt.xlabel("Aantal iteraties")
plt.ylabel("Value")
```
# Opdracht 3
Hieronder Heb ik een voorbeeld run van de code met output van de values en greedy-policy. En hierin is te zien dat de vakje richting de finish van 40+ steeds donkerder worden. Wat laat zien dat de agent die richting op gaat.
```
from grid import Grid
import numpy as np
rewards = np.array([
[-1, -1, -1, 40],
[-1, -1, -10, -10],
[-1, -1, -1, -1],
[10, -2, -1, -1]
])
terminal_states = [(0, 3), (3, 0)]
gamma = 0.9
grid = Grid(rewards, terminal_states, gamma)
grid.run(10, verbose=True)
```
|
github_jupyter
|
import matplotlib.pyplot as plt
x = [i for i in range(14)]
s0 = [0, -0.1, -0.62, -0.7, -0.975, -1, -1.1375, -1.15, -1.21875, -1.225, -1.259375, -1.2625, -1.2796875, -1.28125]
s1 = [0, -0.55, -0.6, -0.875, -0.9, -1.0375, -1.05, -1.11875, -1.125, -1.159375, -1.1625, -1.1796875, -1.18125, -1.18984375]
s2 = [0 for i in range(14)]
plt.plot(x, s0)
plt.plot(x, s1)
plt.plot(x, s2)
plt.legend(["s0", "s1", "s2"])
plt.title("Value Iteration (Opdracht 2)")
plt.xlabel("Aantal iteraties")
plt.ylabel("Value")
from grid import Grid
import numpy as np
rewards = np.array([
[-1, -1, -1, 40],
[-1, -1, -10, -10],
[-1, -1, -1, -1],
[10, -2, -1, -1]
])
terminal_states = [(0, 3), (3, 0)]
gamma = 0.9
grid = Grid(rewards, terminal_states, gamma)
grid.run(10, verbose=True)
| 0.439507 | 0.907681 |
TSG036 - Controller logs
========================
Get the last ‘n’ hours of controller logs.
Steps
-----
### Parameters
```
since_hours = 2
since_seconds = since_hours * 3600 # seconds in hour
coalesce_duplicates = True
```
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG112 - App-Deploy Proxy Nginx Logs](../log-analyzers/tsg112-get-approxy-nginx-logs.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
```
### Get the namespace for the big data cluster
Get the namespace of the Big Data Cluster from the Kuberenetes API.
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
```
### Get controller logs
```
container = "controller"
pod_list = api.list_namespaced_pod(namespace, label_selector="app=controller")
entries_for_analysis = []
for pod in pod_list.items:
print (f"Logs for controller pod: {pod.metadata.name}")
try:
logs = api.read_namespaced_pod_log(pod.metadata.name, namespace, container=container, since_seconds=since_seconds)
except Exception as err:
print(f"ERROR: {err}")
pass
else:
if coalesce_duplicates:
previous_line = ""
duplicates = 1
for line in logs.split('\n'):
if line[27:] != previous_line[27:]:
if duplicates != 1:
print(f"\t{previous_line} (x{duplicates})")
print(f"\t{line}")
duplicates = 1
else:
duplicates = duplicates + 1
continue
if line[25:34] == "| ERROR |" or line[25:33] == "| WARN |":
entries_for_analysis.append(line)
previous_line = line
else:
print(logs)
print (f"There were {len(entries_for_analysis)} warnings and errors found.")
```
### Analyze log entries and suggest relevant Troubleshooting Guides
```
# Analyze log entries and suggest further relevant troubleshooting guides
from IPython.display import Markdown
import os
import json
import requests
import ipykernel
import datetime
from urllib.parse import urljoin
from notebook import notebookapp
def get_notebook_name():
"""Return the full path of the jupyter notebook. Some runtimes (e.g. ADS)
have the kernel_id in the filename of the connection file. If so, the
notebook name at runtime can be determined using `list_running_servers`.
Other runtimes (e.g. azdata) do not have the kernel_id in the filename of
the connection file, therefore we are unable to establish the filename
"""
connection_file = os.path.basename(ipykernel.get_connection_file())
# If the runtime has the kernel_id in the connection filename, use it to
# get the real notebook name at runtime, otherwise, use the notebook
# filename from build time.
try:
kernel_id = connection_file.split('-', 1)[1].split('.')[0]
except:
pass
else:
for servers in list(notebookapp.list_running_servers()):
try:
response = requests.get(urljoin(servers['url'], 'api/sessions'), params={'token': servers.get('token', '')}, timeout=.01)
except:
pass
else:
for nn in json.loads(response.text):
if nn['kernel']['id'] == kernel_id:
return nn['path']
def load_json(filename):
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def get_notebook_rules():
"""Load the notebook rules from the metadata of this notebook (in the .ipynb file)"""
file_name = get_notebook_name()
if file_name == None:
return None
else:
j = load_json(file_name)
if "azdata" not in j["metadata"] or \
"expert" not in j["metadata"]["azdata"] or \
"log_analyzer_rules" not in j["metadata"]["azdata"]["expert"]:
return []
else:
return j["metadata"]["azdata"]["expert"]["log_analyzer_rules"]
rules = get_notebook_rules()
if rules == None:
print("")
print(f"Log Analysis only available when run in Azure Data Studio. Not available when run in azdata.")
else:
hints = 0
if len(rules) > 0:
for entry in entries_for_analysis:
for rule in rules:
if entry.find(rule[0]) != -1:
print (entry)
display(Markdown(f'HINT: Use [{rule[2]}]({rule[3]}) to resolve this issue.'))
hints = hints + 1
print("")
print(f"{len(entries_for_analysis)} log entries analyzed (using {len(rules)} rules). {hints} further troubleshooting hints made inline.")
print('Notebook execution complete.')
```
Related
-------
- [TSG027 - Observe cluster
deployment](../diagnose/tsg027-observe-bdc-create.ipynb)
|
github_jupyter
|
since_hours = 2
since_seconds = since_hours * 3600 # seconds in hour
coalesce_duplicates = True
# Instantiate the Python Kubernetes client into 'api' variable
import os
try:
from kubernetes import client, config
from kubernetes.stream import stream
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG112 - App-Deploy Proxy Nginx Logs](../log-analyzers/tsg112-get-approxy-nginx-logs.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
except ImportError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
container = "controller"
pod_list = api.list_namespaced_pod(namespace, label_selector="app=controller")
entries_for_analysis = []
for pod in pod_list.items:
print (f"Logs for controller pod: {pod.metadata.name}")
try:
logs = api.read_namespaced_pod_log(pod.metadata.name, namespace, container=container, since_seconds=since_seconds)
except Exception as err:
print(f"ERROR: {err}")
pass
else:
if coalesce_duplicates:
previous_line = ""
duplicates = 1
for line in logs.split('\n'):
if line[27:] != previous_line[27:]:
if duplicates != 1:
print(f"\t{previous_line} (x{duplicates})")
print(f"\t{line}")
duplicates = 1
else:
duplicates = duplicates + 1
continue
if line[25:34] == "| ERROR |" or line[25:33] == "| WARN |":
entries_for_analysis.append(line)
previous_line = line
else:
print(logs)
print (f"There were {len(entries_for_analysis)} warnings and errors found.")
# Analyze log entries and suggest further relevant troubleshooting guides
from IPython.display import Markdown
import os
import json
import requests
import ipykernel
import datetime
from urllib.parse import urljoin
from notebook import notebookapp
def get_notebook_name():
"""Return the full path of the jupyter notebook. Some runtimes (e.g. ADS)
have the kernel_id in the filename of the connection file. If so, the
notebook name at runtime can be determined using `list_running_servers`.
Other runtimes (e.g. azdata) do not have the kernel_id in the filename of
the connection file, therefore we are unable to establish the filename
"""
connection_file = os.path.basename(ipykernel.get_connection_file())
# If the runtime has the kernel_id in the connection filename, use it to
# get the real notebook name at runtime, otherwise, use the notebook
# filename from build time.
try:
kernel_id = connection_file.split('-', 1)[1].split('.')[0]
except:
pass
else:
for servers in list(notebookapp.list_running_servers()):
try:
response = requests.get(urljoin(servers['url'], 'api/sessions'), params={'token': servers.get('token', '')}, timeout=.01)
except:
pass
else:
for nn in json.loads(response.text):
if nn['kernel']['id'] == kernel_id:
return nn['path']
def load_json(filename):
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def get_notebook_rules():
"""Load the notebook rules from the metadata of this notebook (in the .ipynb file)"""
file_name = get_notebook_name()
if file_name == None:
return None
else:
j = load_json(file_name)
if "azdata" not in j["metadata"] or \
"expert" not in j["metadata"]["azdata"] or \
"log_analyzer_rules" not in j["metadata"]["azdata"]["expert"]:
return []
else:
return j["metadata"]["azdata"]["expert"]["log_analyzer_rules"]
rules = get_notebook_rules()
if rules == None:
print("")
print(f"Log Analysis only available when run in Azure Data Studio. Not available when run in azdata.")
else:
hints = 0
if len(rules) > 0:
for entry in entries_for_analysis:
for rule in rules:
if entry.find(rule[0]) != -1:
print (entry)
display(Markdown(f'HINT: Use [{rule[2]}]({rule[3]}) to resolve this issue.'))
hints = hints + 1
print("")
print(f"{len(entries_for_analysis)} log entries analyzed (using {len(rules)} rules). {hints} further troubleshooting hints made inline.")
print('Notebook execution complete.')
| 0.504883 | 0.665961 |
<a href="https://colab.research.google.com/github/abbaasalif/trasnformers_for_translation/blob/main/Transformer_for_translation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Importing dependencies
```
import numpy as np
import math
import re
import time
from google.colab import drive
import tensorflow as tf
from tensorflow.keras import layers
import tensorflow_datasets as tfds
```
# Data preprocessing
## Load the files
- we import the files from our personal drive folder
```
drive.mount('/content/drive')
with open('/content/drive/MyDrive/transformers/P85-Non-Breaking-Prefix.en',
mode='r',
encoding='utf-8') as f:
non_breaking_prefix_en = f.read()
with open('/content/drive/MyDrive/transformers/P85-Non-Breaking-Prefix.fr',
mode='r',
encoding='utf-8') as f:
non_breaking_prefix_fr = f.read()
with open('/content/drive/MyDrive/transformers/europarl-v7.fr-en.en',
mode='r',
encoding='utf-8') as f:
europarl_en = f.read()
with open('/content/drive/MyDrive/transformers/europarl-v7.fr-en.fr',
mode='r',
encoding='utf-8') as f:
europarl_fr = f.read()
```
# Cleaning Data
- Getting the non_breaing_prefixes as a clean list of words with a point at the end so it is easier to use
```
non_breaking_prefix_en = non_breaking_prefix_en.split("\n")
non_breaking_prefix_en = [" "+ pref +"." for pref in non_breaking_prefix_en]
non_breaking_prefix_fr = non_breaking_prefix_fr.split("\n")
non_breaking_prefix_fr = [" "+ pref +"." for pref in non_breaking_prefix_fr]
corpus_en = europarl_en
#Adding ### after non ending sentence points
for prefix in non_breaking_prefix_en:
corpus_en = corpus_en.replace(prefix, prefix+"$$$")
corpus_en = re.sub(r"\.(?=[0=9]|[a-z]|[A-Z])",".$$$", corpus_en)
#Remove ### markers
corpus_en = re.sub(r".\$\$\$", '', corpus_en)
# Clear muliple spaces
corpus_en = re.sub(r" +", " ", corpus_en)
corpus_en = corpus_en.split("\n")
corpus_fr = europarl_fr
for prefix in non_breaking_prefix_fr:
corpus_fr = corpus_fr.replace(prefix, prefix+"$$$")
corpus_fr = re.sub(r"\.(?=[0=9]|[a-z]|[A-Z])",".$$$", corpus_fr)
corpus_fr = re.sub(r".\$\$\$",'', corpus_fr)
corpus_fr = re.sub(r" +", " ", corpus_fr)
corpus_fr = corpus_fr.split("\n")
```
# Tokenization
```
tokenizer_en = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
corpus_en, target_vocab_size=2**13)
tokenizer_fr = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
corpus_fr, target_vocab_size=2**13)
VOCAB_SIZE_EN = tokenizer_en.vocab_size+2
VOCAB_SIZE_FR = tokenizer_fr.vocab_size+2
VOCAB_SIZE_EN
inputs = [[VOCAB_SIZE_EN-2] + tokenizer_en.encode(sentence) + [VOCAB_SIZE_EN-1]
for sentence in corpus_en]
outputs = [[VOCAB_SIZE_FR-2] + tokenizer_fr.encode(sentence) + [VOCAB_SIZE_FR-1]
for sentence in corpus_fr]
```
# Remove too long sentences
```
MAX_LENGTH = 20
idx_to_remove = [count for count, sent in enumerate(inputs)
if len(sent) > MAX_LENGTH]
for idx in reversed(idx_to_remove):
del inputs[idx]
del outputs[idx]
idx_to_remove = [count for count, sent in enumerate(outputs)
if len(sent) > MAX_LENGTH]
for idx in reversed(idx_to_remove):
del inputs[idx]
del outputs[idx]
```
# Padding the Inputs & Outputs
```
inputs = tf.keras.preprocessing.sequence.pad_sequences(inputs, value=0,
padding="post",
maxlen=MAX_LENGTH)
outputs = tf.keras.preprocessing.sequence.pad_sequences(outputs, value=0,
padding="post",
maxlen=MAX_LENGTH)
BATCH_SIZE= 64
BUFFER_SIZE = 20000 # shuffling of dataset
dataset = tf.data.Dataset.from_tensor_slices((inputs, outputs))
dataset = dataset.cache()
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
```
# Model Building
## Embedding
Positional encoding formulas:
$PE_{(pos,2i)} =\sin(pos/10000^{2i/dmodel})$
$PE_{(pos,2i+1)} =\cos(pos/10000^{2i/dmodel})$
```
class PositionalEncoding(layers.Layer):
def __init__(self):
super(PositionalEncoding, self).__init__()
def get_angles(self, pos, i, d_model):
angles = 1 / np.power(10000., (2*(i//2)) / np.float32(d_model))
return pos * angles
def call(self, inputs):
seq_length = inputs.shape.as_list()[-2]
d_model = inputs.shape.as_list()[-1]
angles = self.get_angles(np.arange(seq_length)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
angles[:, 0::2] = np.sin(angles[:, 0::2])
angles[:, 1::2] = np.cos(angles[:, 1::2])
pos_encoding = angles[np.newaxis, ...]
return inputs + tf.cast(pos_encoding, tf.float32)
```
## Attention
### Attention computation
$Attention(Q, K, V ) = \text{softmax}\left(\dfrac{QK^T}{\sqrt{d_k}}\right)V $
```
def scaled_dot_product_attention(queries, keys, values, mask):
product = tf.matmul(queries, keys, transpose_b=True)
keys_dim = tf.cast(tf.shape(keys)[-1], tf.float32)
scaled_product = product / tf.math.sqrt(keys_dim)
if mask is not None:
scaled_product += (mask * -1e9)
attention = tf.matmul(tf.nn.softmax(scaled_product, axis=-1), values)
return attention
```
## Multi-headed attention sublayer
```
class MultiHeadAttention(layers.Layer):
def __init__(self, nb_proj):
super(MultiHeadAttention, self).__init__()
self.nb_proj = nb_proj
def build(self, input_shape):
self.d_model = input_shape[-1]
assert self.d_model % self.nb_proj == 0
self.d_proj = self.d_model // self.nb_proj
self.query_lin = layers.Dense(units=self.d_model)
self.key_lin = layers.Dense(units=self.d_model)
self.value_lin = layers.Dense(units=self.d_model)
self.final_lin = layers.Dense(units=self.d_model)
def split_proj(self, inputs, batch_size): # inputs: (batch_size, seq_length, d_model)
shape = (batch_size,
-1,
self.nb_proj,
self.d_proj)
splited_inputs = tf.reshape(inputs, shape=shape) # (batch_size, seq_length, nb_proj, d_proj)
return tf.transpose(splited_inputs, perm=[0, 2, 1, 3]) # (batch_size, nb_proj, seq_length, d_proj)
def call(self, queries, keys, values, mask):
batch_size = tf.shape(queries)[0]
queries = self.query_lin(queries)
keys = self.key_lin(keys)
values = self.value_lin(values)
queries = self.split_proj(queries, batch_size)
keys = self.split_proj(keys, batch_size)
values = self.split_proj(values, batch_size)
attention = scaled_dot_product_attention(queries, keys, values, mask)
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(attention,
shape=(batch_size, -1, self.d_model))
outputs = self.final_lin(concat_attention)
return outputs
```
# Encoder
```
class EncoderLayer(layers.Layer):
def __init__(self, FFN_units, nb_proj, dropout_rate):
super(EncoderLayer, self).__init__()
self.FFN_units = FFN_units
self.nb_proj = nb_proj
self.dropout_rate = dropout_rate
def build(self, input_shape):
self.d_model = input_shape[-1]
self.multi_head_attention = MultiHeadAttention(self.nb_proj)
self.dropout_1 = layers.Dropout(rate=self.dropout_rate)
self.norm_1 = layers.LayerNormalization(epsilon=1e-6)
self.dense_1 = layers.Dense(units=self.FFN_units, activation="relu")
self.dense_2 = layers.Dense(units=self.d_model)
self.dropout_2 = layers.Dropout(rate=self.dropout_rate)
self.norm_2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs, mask, training):
attention = self.multi_head_attention(inputs,
inputs,
inputs,
mask)
attention = self.dropout_1(attention, training=training)
attention = self.norm_1(attention + inputs)
outputs = self.dense_1(attention)
outputs = self.dense_2(outputs)
outputs = self.dropout_2(outputs, training=training)
outputs = self.norm_2(outputs + attention)
return outputs
class Encoder(layers.Layer):
def __init__(self,
nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size,
d_model,
name="encoder"):
super(Encoder, self).__init__(name=name)
self.nb_layers = nb_layers
self.d_model = d_model
self.embedding = layers.Embedding(vocab_size, d_model)
self.pos_encoding = PositionalEncoding()
self.dropout = layers.Dropout(rate=dropout_rate)
self.enc_layers = [EncoderLayer(FFN_units,
nb_proj,
dropout_rate)
for _ in range(nb_layers)]
def call(self, inputs, mask, training):
outputs = self.embedding(inputs)
outputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
outputs = self.pos_encoding(outputs)
outputs = self.dropout(outputs, training)
for i in range(self.nb_layers):
outputs = self.enc_layers[i](outputs, mask, training)
return outputs
```
# Decoder
```
class DecoderLayer(layers.Layer):
def __init__(self, FFN_units, nb_proj, dropout_rate):
super(DecoderLayer, self).__init__()
self.FFN_units = FFN_units
self.nb_proj = nb_proj
self.dropout_rate = dropout_rate
def build(self, input_shape):
self.d_model = input_shape[-1]
# Self multi head attention
self.multi_head_attention_1 = MultiHeadAttention(self.nb_proj)
self.dropout_1 = layers.Dropout(rate=self.dropout_rate)
self.norm_1 = layers.LayerNormalization(epsilon=1e-6)
# Multi head attention combined with encoder output
self.multi_head_attention_2 = MultiHeadAttention(self.nb_proj)
self.dropout_2 = layers.Dropout(rate=self.dropout_rate)
self.norm_2 = layers.LayerNormalization(epsilon=1e-6)
# Feed foward
self.dense_1 = layers.Dense(units=self.FFN_units,
activation="relu")
self.dense_2 = layers.Dense(units=self.d_model)
self.dropout_3 = layers.Dropout(rate=self.dropout_rate)
self.norm_3 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs, enc_outputs, mask_1, mask_2, training):
attention = self.multi_head_attention_1(inputs,
inputs,
inputs,
mask_1)
attention = self.dropout_1(attention, training)
attention = self.norm_1(attention + inputs)
attention_2 = self.multi_head_attention_2(attention,
enc_outputs,
enc_outputs,
mask_2)
attention_2 = self.dropout_2(attention_2, training)
attention_2 = self.norm_2(attention_2 + attention)
outputs = self.dense_1(attention_2)
outputs = self.dense_2(outputs)
outputs = self.dropout_3(outputs, training)
outputs = self.norm_3(outputs + attention_2)
return outputs
class Decoder(layers.Layer):
def __init__(self,
nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size,
d_model,
name="decoder"):
super(Decoder, self).__init__(name=name)
self.d_model = d_model
self.nb_layers = nb_layers
self.embedding = layers.Embedding(vocab_size, d_model)
self.pos_encoding = PositionalEncoding()
self.dropout = layers.Dropout(rate=dropout_rate)
self.dec_layers = [DecoderLayer(FFN_units,
nb_proj,
dropout_rate)
for i in range(nb_layers)]
def call(self, inputs, enc_outputs, mask_1, mask_2, training):
outputs = self.embedding(inputs)
outputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
outputs = self.pos_encoding(outputs)
outputs = self.dropout(outputs, training)
for i in range(self.nb_layers):
outputs = self.dec_layers[i](outputs,
enc_outputs,
mask_1,
mask_2,
training)
return outputs
```
# Transformer
```
class Transformer(tf.keras.Model):
def __init__(self,
vocab_size_enc,
vocab_size_dec,
d_model,
nb_layers,
FFN_units,
nb_proj,
dropout_rate,
name="transformer"):
super(Transformer, self).__init__(name=name)
self.encoder = Encoder(nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size_enc,
d_model)
self.decoder = Decoder(nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size_dec,
d_model)
self.last_linear = layers.Dense(units=vocab_size_dec, name="lin_ouput")
def create_padding_mask(self, seq):
mask = tf.cast(tf.math.equal(seq, 0), tf.float32)
return mask[:, tf.newaxis, tf.newaxis, :]
def create_look_ahead_mask(self, seq):
seq_len = tf.shape(seq)[1]
look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
return look_ahead_mask
def call(self, enc_inputs, dec_inputs, training):
enc_mask = self.create_padding_mask(enc_inputs)
dec_mask_1 = tf.maximum(
self.create_padding_mask(dec_inputs),
self.create_look_ahead_mask(dec_inputs)
)
dec_mask_2 = self.create_padding_mask(enc_inputs)
enc_outputs = self.encoder(enc_inputs, enc_mask, training)
dec_outputs = self.decoder(dec_inputs,
enc_outputs,
dec_mask_1,
dec_mask_2,
training)
outputs = self.last_linear(dec_outputs)
return outputs
```
# Training
```
tf.keras.backend.clear_session()
# Hyper-parameters
D_MODEL = 128 # 512
NB_LAYERS = 4 # 6
FFN_UNITS = 512 # 2048
NB_PROJ = 8 # 8
DROPOUT_RATE = 0.1 # 0.1
transformer = Transformer(vocab_size_enc=VOCAB_SIZE_EN,
vocab_size_dec=VOCAB_SIZE_FR,
d_model=D_MODEL,
nb_layers=NB_LAYERS,
FFN_units=FFN_UNITS,
nb_proj=NB_PROJ,
dropout_rate=DROPOUT_RATE)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True,
reduction = "none"
)
def loss_function(target, pred):
mask = tf.math.logical_not(tf.equal(target,0))
loss_ = loss_object(target, pred)
mask = tf.cast(mask, dtype = loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
class CustomSchedule(tf.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = tf.cast(d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps**-1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(D_MODEL)
optimizer = tf.keras.optimizers.Adam(
learning_rate,
beta_1 = 0.9,
beta_2 = 0.98,
epsilon = 1e-9
)
checkpoint_path = "/content/drive/MyDrive/transformers/ckpt"
ckpt = tf.train.Checkpoint(transformer = transformer,
optimizer = optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Latest Checkpoint restored!!!")
EPOCHS = 10
for epoch in range(EPOCHS):
print("Start of epoch {}".format(epoch+1))
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
for (batch, (enc_inputs, targets)) in enumerate(dataset):
dec_inputs = targets[:, :-1]
dec_outputs_real = targets[:, 1:]
with tf.GradientTape() as tape:
predictions = transformer(enc_inputs, dec_inputs, True)
loss = loss_function(dec_outputs_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(dec_outputs_real, predictions)
if batch % 50 == 0:
print("Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}".format(
epoch+1, batch, train_loss.result(), train_accuracy.result()))
ckpt_save_path = ckpt_manager.save()
print("Saving checkpoint for epoch {} at {}".format(epoch+1,
ckpt_save_path))
print("Time taken for 1 epoch: {} secs\n".format(time.time() - start))
```
# Evaluating
```
def evaluate(inp_sentence):
inp_sentence = \
[VOCAB_SIZE_EN-2] + tokenizer_en.encode(inp_sentence) + [VOCAB_SIZE_EN-1]
enc_input = tf.expand_dims(inp_sentence, axis=0)
output = tf.expand_dims([VOCAB_SIZE_FR-2], axis=0)
for _ in range(MAX_LENGTH):
predictions = transformer(enc_input, output, False)
prediction = predictions[:, -1:, :]
predicted_id = tf.cast(tf.argmax(prediction, axis=-1), tf.int32)
if predicted_id == VOCAB_SIZE_FR-1:
return tf.squeeze(output, axis=0)
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0)
def translate(sentence):
output = evaluate(sentence).numpy()
predicted_sentence = tokenizer_fr.decode(
[i for i in output if i < VOCAB_SIZE_FR-2]
)
print("Input: {}".format(sentence))
print("Predicted translation: {}".format(predicted_sentence))
translate("This is my first model.")
```
|
github_jupyter
|
import numpy as np
import math
import re
import time
from google.colab import drive
import tensorflow as tf
from tensorflow.keras import layers
import tensorflow_datasets as tfds
drive.mount('/content/drive')
with open('/content/drive/MyDrive/transformers/P85-Non-Breaking-Prefix.en',
mode='r',
encoding='utf-8') as f:
non_breaking_prefix_en = f.read()
with open('/content/drive/MyDrive/transformers/P85-Non-Breaking-Prefix.fr',
mode='r',
encoding='utf-8') as f:
non_breaking_prefix_fr = f.read()
with open('/content/drive/MyDrive/transformers/europarl-v7.fr-en.en',
mode='r',
encoding='utf-8') as f:
europarl_en = f.read()
with open('/content/drive/MyDrive/transformers/europarl-v7.fr-en.fr',
mode='r',
encoding='utf-8') as f:
europarl_fr = f.read()
non_breaking_prefix_en = non_breaking_prefix_en.split("\n")
non_breaking_prefix_en = [" "+ pref +"." for pref in non_breaking_prefix_en]
non_breaking_prefix_fr = non_breaking_prefix_fr.split("\n")
non_breaking_prefix_fr = [" "+ pref +"." for pref in non_breaking_prefix_fr]
corpus_en = europarl_en
#Adding ### after non ending sentence points
for prefix in non_breaking_prefix_en:
corpus_en = corpus_en.replace(prefix, prefix+"$$$")
corpus_en = re.sub(r"\.(?=[0=9]|[a-z]|[A-Z])",".$$$", corpus_en)
#Remove ### markers
corpus_en = re.sub(r".\$\$\$", '', corpus_en)
# Clear muliple spaces
corpus_en = re.sub(r" +", " ", corpus_en)
corpus_en = corpus_en.split("\n")
corpus_fr = europarl_fr
for prefix in non_breaking_prefix_fr:
corpus_fr = corpus_fr.replace(prefix, prefix+"$$$")
corpus_fr = re.sub(r"\.(?=[0=9]|[a-z]|[A-Z])",".$$$", corpus_fr)
corpus_fr = re.sub(r".\$\$\$",'', corpus_fr)
corpus_fr = re.sub(r" +", " ", corpus_fr)
corpus_fr = corpus_fr.split("\n")
tokenizer_en = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
corpus_en, target_vocab_size=2**13)
tokenizer_fr = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
corpus_fr, target_vocab_size=2**13)
VOCAB_SIZE_EN = tokenizer_en.vocab_size+2
VOCAB_SIZE_FR = tokenizer_fr.vocab_size+2
VOCAB_SIZE_EN
inputs = [[VOCAB_SIZE_EN-2] + tokenizer_en.encode(sentence) + [VOCAB_SIZE_EN-1]
for sentence in corpus_en]
outputs = [[VOCAB_SIZE_FR-2] + tokenizer_fr.encode(sentence) + [VOCAB_SIZE_FR-1]
for sentence in corpus_fr]
MAX_LENGTH = 20
idx_to_remove = [count for count, sent in enumerate(inputs)
if len(sent) > MAX_LENGTH]
for idx in reversed(idx_to_remove):
del inputs[idx]
del outputs[idx]
idx_to_remove = [count for count, sent in enumerate(outputs)
if len(sent) > MAX_LENGTH]
for idx in reversed(idx_to_remove):
del inputs[idx]
del outputs[idx]
inputs = tf.keras.preprocessing.sequence.pad_sequences(inputs, value=0,
padding="post",
maxlen=MAX_LENGTH)
outputs = tf.keras.preprocessing.sequence.pad_sequences(outputs, value=0,
padding="post",
maxlen=MAX_LENGTH)
BATCH_SIZE= 64
BUFFER_SIZE = 20000 # shuffling of dataset
dataset = tf.data.Dataset.from_tensor_slices((inputs, outputs))
dataset = dataset.cache()
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
class PositionalEncoding(layers.Layer):
def __init__(self):
super(PositionalEncoding, self).__init__()
def get_angles(self, pos, i, d_model):
angles = 1 / np.power(10000., (2*(i//2)) / np.float32(d_model))
return pos * angles
def call(self, inputs):
seq_length = inputs.shape.as_list()[-2]
d_model = inputs.shape.as_list()[-1]
angles = self.get_angles(np.arange(seq_length)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
angles[:, 0::2] = np.sin(angles[:, 0::2])
angles[:, 1::2] = np.cos(angles[:, 1::2])
pos_encoding = angles[np.newaxis, ...]
return inputs + tf.cast(pos_encoding, tf.float32)
def scaled_dot_product_attention(queries, keys, values, mask):
product = tf.matmul(queries, keys, transpose_b=True)
keys_dim = tf.cast(tf.shape(keys)[-1], tf.float32)
scaled_product = product / tf.math.sqrt(keys_dim)
if mask is not None:
scaled_product += (mask * -1e9)
attention = tf.matmul(tf.nn.softmax(scaled_product, axis=-1), values)
return attention
class MultiHeadAttention(layers.Layer):
def __init__(self, nb_proj):
super(MultiHeadAttention, self).__init__()
self.nb_proj = nb_proj
def build(self, input_shape):
self.d_model = input_shape[-1]
assert self.d_model % self.nb_proj == 0
self.d_proj = self.d_model // self.nb_proj
self.query_lin = layers.Dense(units=self.d_model)
self.key_lin = layers.Dense(units=self.d_model)
self.value_lin = layers.Dense(units=self.d_model)
self.final_lin = layers.Dense(units=self.d_model)
def split_proj(self, inputs, batch_size): # inputs: (batch_size, seq_length, d_model)
shape = (batch_size,
-1,
self.nb_proj,
self.d_proj)
splited_inputs = tf.reshape(inputs, shape=shape) # (batch_size, seq_length, nb_proj, d_proj)
return tf.transpose(splited_inputs, perm=[0, 2, 1, 3]) # (batch_size, nb_proj, seq_length, d_proj)
def call(self, queries, keys, values, mask):
batch_size = tf.shape(queries)[0]
queries = self.query_lin(queries)
keys = self.key_lin(keys)
values = self.value_lin(values)
queries = self.split_proj(queries, batch_size)
keys = self.split_proj(keys, batch_size)
values = self.split_proj(values, batch_size)
attention = scaled_dot_product_attention(queries, keys, values, mask)
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(attention,
shape=(batch_size, -1, self.d_model))
outputs = self.final_lin(concat_attention)
return outputs
class EncoderLayer(layers.Layer):
def __init__(self, FFN_units, nb_proj, dropout_rate):
super(EncoderLayer, self).__init__()
self.FFN_units = FFN_units
self.nb_proj = nb_proj
self.dropout_rate = dropout_rate
def build(self, input_shape):
self.d_model = input_shape[-1]
self.multi_head_attention = MultiHeadAttention(self.nb_proj)
self.dropout_1 = layers.Dropout(rate=self.dropout_rate)
self.norm_1 = layers.LayerNormalization(epsilon=1e-6)
self.dense_1 = layers.Dense(units=self.FFN_units, activation="relu")
self.dense_2 = layers.Dense(units=self.d_model)
self.dropout_2 = layers.Dropout(rate=self.dropout_rate)
self.norm_2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs, mask, training):
attention = self.multi_head_attention(inputs,
inputs,
inputs,
mask)
attention = self.dropout_1(attention, training=training)
attention = self.norm_1(attention + inputs)
outputs = self.dense_1(attention)
outputs = self.dense_2(outputs)
outputs = self.dropout_2(outputs, training=training)
outputs = self.norm_2(outputs + attention)
return outputs
class Encoder(layers.Layer):
def __init__(self,
nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size,
d_model,
name="encoder"):
super(Encoder, self).__init__(name=name)
self.nb_layers = nb_layers
self.d_model = d_model
self.embedding = layers.Embedding(vocab_size, d_model)
self.pos_encoding = PositionalEncoding()
self.dropout = layers.Dropout(rate=dropout_rate)
self.enc_layers = [EncoderLayer(FFN_units,
nb_proj,
dropout_rate)
for _ in range(nb_layers)]
def call(self, inputs, mask, training):
outputs = self.embedding(inputs)
outputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
outputs = self.pos_encoding(outputs)
outputs = self.dropout(outputs, training)
for i in range(self.nb_layers):
outputs = self.enc_layers[i](outputs, mask, training)
return outputs
class DecoderLayer(layers.Layer):
def __init__(self, FFN_units, nb_proj, dropout_rate):
super(DecoderLayer, self).__init__()
self.FFN_units = FFN_units
self.nb_proj = nb_proj
self.dropout_rate = dropout_rate
def build(self, input_shape):
self.d_model = input_shape[-1]
# Self multi head attention
self.multi_head_attention_1 = MultiHeadAttention(self.nb_proj)
self.dropout_1 = layers.Dropout(rate=self.dropout_rate)
self.norm_1 = layers.LayerNormalization(epsilon=1e-6)
# Multi head attention combined with encoder output
self.multi_head_attention_2 = MultiHeadAttention(self.nb_proj)
self.dropout_2 = layers.Dropout(rate=self.dropout_rate)
self.norm_2 = layers.LayerNormalization(epsilon=1e-6)
# Feed foward
self.dense_1 = layers.Dense(units=self.FFN_units,
activation="relu")
self.dense_2 = layers.Dense(units=self.d_model)
self.dropout_3 = layers.Dropout(rate=self.dropout_rate)
self.norm_3 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs, enc_outputs, mask_1, mask_2, training):
attention = self.multi_head_attention_1(inputs,
inputs,
inputs,
mask_1)
attention = self.dropout_1(attention, training)
attention = self.norm_1(attention + inputs)
attention_2 = self.multi_head_attention_2(attention,
enc_outputs,
enc_outputs,
mask_2)
attention_2 = self.dropout_2(attention_2, training)
attention_2 = self.norm_2(attention_2 + attention)
outputs = self.dense_1(attention_2)
outputs = self.dense_2(outputs)
outputs = self.dropout_3(outputs, training)
outputs = self.norm_3(outputs + attention_2)
return outputs
class Decoder(layers.Layer):
def __init__(self,
nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size,
d_model,
name="decoder"):
super(Decoder, self).__init__(name=name)
self.d_model = d_model
self.nb_layers = nb_layers
self.embedding = layers.Embedding(vocab_size, d_model)
self.pos_encoding = PositionalEncoding()
self.dropout = layers.Dropout(rate=dropout_rate)
self.dec_layers = [DecoderLayer(FFN_units,
nb_proj,
dropout_rate)
for i in range(nb_layers)]
def call(self, inputs, enc_outputs, mask_1, mask_2, training):
outputs = self.embedding(inputs)
outputs *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
outputs = self.pos_encoding(outputs)
outputs = self.dropout(outputs, training)
for i in range(self.nb_layers):
outputs = self.dec_layers[i](outputs,
enc_outputs,
mask_1,
mask_2,
training)
return outputs
class Transformer(tf.keras.Model):
def __init__(self,
vocab_size_enc,
vocab_size_dec,
d_model,
nb_layers,
FFN_units,
nb_proj,
dropout_rate,
name="transformer"):
super(Transformer, self).__init__(name=name)
self.encoder = Encoder(nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size_enc,
d_model)
self.decoder = Decoder(nb_layers,
FFN_units,
nb_proj,
dropout_rate,
vocab_size_dec,
d_model)
self.last_linear = layers.Dense(units=vocab_size_dec, name="lin_ouput")
def create_padding_mask(self, seq):
mask = tf.cast(tf.math.equal(seq, 0), tf.float32)
return mask[:, tf.newaxis, tf.newaxis, :]
def create_look_ahead_mask(self, seq):
seq_len = tf.shape(seq)[1]
look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
return look_ahead_mask
def call(self, enc_inputs, dec_inputs, training):
enc_mask = self.create_padding_mask(enc_inputs)
dec_mask_1 = tf.maximum(
self.create_padding_mask(dec_inputs),
self.create_look_ahead_mask(dec_inputs)
)
dec_mask_2 = self.create_padding_mask(enc_inputs)
enc_outputs = self.encoder(enc_inputs, enc_mask, training)
dec_outputs = self.decoder(dec_inputs,
enc_outputs,
dec_mask_1,
dec_mask_2,
training)
outputs = self.last_linear(dec_outputs)
return outputs
tf.keras.backend.clear_session()
# Hyper-parameters
D_MODEL = 128 # 512
NB_LAYERS = 4 # 6
FFN_UNITS = 512 # 2048
NB_PROJ = 8 # 8
DROPOUT_RATE = 0.1 # 0.1
transformer = Transformer(vocab_size_enc=VOCAB_SIZE_EN,
vocab_size_dec=VOCAB_SIZE_FR,
d_model=D_MODEL,
nb_layers=NB_LAYERS,
FFN_units=FFN_UNITS,
nb_proj=NB_PROJ,
dropout_rate=DROPOUT_RATE)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True,
reduction = "none"
)
def loss_function(target, pred):
mask = tf.math.logical_not(tf.equal(target,0))
loss_ = loss_object(target, pred)
mask = tf.cast(mask, dtype = loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
class CustomSchedule(tf.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = tf.cast(d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps**-1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(D_MODEL)
optimizer = tf.keras.optimizers.Adam(
learning_rate,
beta_1 = 0.9,
beta_2 = 0.98,
epsilon = 1e-9
)
checkpoint_path = "/content/drive/MyDrive/transformers/ckpt"
ckpt = tf.train.Checkpoint(transformer = transformer,
optimizer = optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Latest Checkpoint restored!!!")
EPOCHS = 10
for epoch in range(EPOCHS):
print("Start of epoch {}".format(epoch+1))
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
for (batch, (enc_inputs, targets)) in enumerate(dataset):
dec_inputs = targets[:, :-1]
dec_outputs_real = targets[:, 1:]
with tf.GradientTape() as tape:
predictions = transformer(enc_inputs, dec_inputs, True)
loss = loss_function(dec_outputs_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(dec_outputs_real, predictions)
if batch % 50 == 0:
print("Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}".format(
epoch+1, batch, train_loss.result(), train_accuracy.result()))
ckpt_save_path = ckpt_manager.save()
print("Saving checkpoint for epoch {} at {}".format(epoch+1,
ckpt_save_path))
print("Time taken for 1 epoch: {} secs\n".format(time.time() - start))
def evaluate(inp_sentence):
inp_sentence = \
[VOCAB_SIZE_EN-2] + tokenizer_en.encode(inp_sentence) + [VOCAB_SIZE_EN-1]
enc_input = tf.expand_dims(inp_sentence, axis=0)
output = tf.expand_dims([VOCAB_SIZE_FR-2], axis=0)
for _ in range(MAX_LENGTH):
predictions = transformer(enc_input, output, False)
prediction = predictions[:, -1:, :]
predicted_id = tf.cast(tf.argmax(prediction, axis=-1), tf.int32)
if predicted_id == VOCAB_SIZE_FR-1:
return tf.squeeze(output, axis=0)
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0)
def translate(sentence):
output = evaluate(sentence).numpy()
predicted_sentence = tokenizer_fr.decode(
[i for i in output if i < VOCAB_SIZE_FR-2]
)
print("Input: {}".format(sentence))
print("Predicted translation: {}".format(predicted_sentence))
translate("This is my first model.")
| 0.401219 | 0.711519 |
# Jupyter Based Content
This an idential copy of the Markdown Based Notebook chapter, but the source file was a Jupyter Notebook (.ipynb) instead of a Markdown (.md) file
## Standard Markdown
These features are supported by all renderers as they are part of the basic set of features of Markdown. Some examples were extracted from the [official docs](https://www.markdownguide.org/basic-syntax).
### Text Formating
In a Markdown text one can use **bold**, *italics* or ***both***. It is also posible to combine it with `monospace`.
### Quotes
>It is also possible to quote
### Lists
To keep everything organize, one usually use lists either ordered
1. Step 1
1. Step 2
1. Step 3
Or without any order
- Requirement 1
- Requirement 2
- Requirement 3
### Horizontal Rule
Useful to separate sections
---
From other non-related topic
### Links
You know that there many search engines online?
There are [some](https://duckduckgo.com/) which focus on privacy, [others](https://www.startpage.com/) just work arround Google and [there is also one](https://www.searchencrypt.com/) that uses ecryption!
### Images
One image is worth a thousand words some say

But it is even better if you can click them
[](https://jupyter.org/)
## Limited Support
These features are part of some flavours of Markdown but not all renderers can process them. It is recommended to visualize this document in the target platform to check exactly which features are supported
### Tables
Tables are really complicated in Markdown and should only be generated by [a specialized tool](http://www.tablesgenerator.com/markdown_tables). Don't modify markdown tables manually
| Syntax | Description |
| ----------- | ----------- |
| Header | Title |
| Paragraph | Text |
### Block of Code
For code blocks, Jupyter Book will add a ```Copy``` button in the top left corner to easily copy all the text in the block.
Some times one wants to share data
(data)=
```
{
"firstName": "John",
"lastName": "Smith",
"age": 25
}
```
Or simply say hello
```python
print("Hello World!")
```
### Footnotes
Here's a sentence with a footnote. [^1]
[^1]: This is the footnote.
### Math
Some renderers can process inline math, $x=2$ and other can also process whole line math
$$
\int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
$$
If supported, alignment can be done with array
$$
\begin{array}{llll}
a_{11}& =b_{11}& a_{12}& =b_{12}\\
a_{21}& =b_{21}& a_{22}& =b_{22}+c_{22}
\end{array}
$$
### Not supported in Jupyter Book
These are features that some renderers support but are not supported by Jupyter Book
#### Strikethrough
Some times you ~~make a mistake~~ have a great idea
However, a HTML approach can produce the desired result
Some times you <s>make a mistake</s> have a great idea
#### Check List
- [x] Write the press release
- [ ] Update the website
- [ ] Contact the media
However, a HTML approach can produce the desired result
<label><input type="checkbox"> Write the press release</input></label><br>
<label><input type="checkbox"> Update the website</input></label><br>
<label><input type="checkbox"> Contact the media</input></label><br>
## MyST Specific
These are some features that are the moment are only compatible with Jupyter Book. That means that rendering the notebook in other services (Github, NBviewer, Nteract, Data Lore, etc.) might not work as shown below. If your only target platform is Jupyter Book, you can use any of the following.
### Colored Admonitions
```{note}
Here is a Note
```
```{important}
Here is an important
```
```{caution} text
Here is a caution
```
```{attention} text
Here is an attention
```
```{warning}
Here is a Warning
```
```{error}
Here is an error
```
```{admonition} Tip
:class: tip
Here is a Tip
```
```{admonition} admonition
Here is a custom admonition
```
### Hidden Toggles
```{toggle} Click the button to reveal!
Some hidden toggle content!
```
### Dropdowns
```{dropdown} Outside Content
Inside Content
```
(homework)=
### Dropdowns with Admonitions
```{note}
:class: dropdown
The note body will be hidden!
```
### Panels
```{panels}
Panel header 1
^^^
Panel body 1
+++
Panel footer 1
---
Panel header 2
^^^
Panel body 2
+++
Panel footer 2
```
### Tabs
```{tabbed} Tab 1 title
My first tab
```
```{tabbed} Tab 2 title
My second tab with `some code`!
```
### Sidebars
```{sidebar} My sidebar title
My sidebar content
```
Sidebars are shown in parallel to the main text, but they span inwards from the margins.
### Margin Note
```{margin} An optional title
My margin content
```
Margin notes are more discrete, useful for pointing minor details or add extra information.
They span from the margin outwards
### Labels
Would you like to write homework for your students? Check the [dropdowns](homework)!
It is always important to provide enough [data](data) to support claims!
### Equations
#### Without Label
```{math}
x^2 + y^2 = 1
```
#### With Label
```{math}
:label: algebra
x^2 + y^2 = 1
```
If the equation has a label, it can be then referenced [](algebra)
### Citation
There are two types of citations, by name (similar to APA style) and by number (similar to IEEE style).
Citations can be customized, for more detailed explanations please refer to one of the Jupyter Book docs ([here](https://jupyterbook.org/tutorials/references.html), [here](https://jupyterbook.org/content/references.html) or [here](https://jupyterbook.org/content/citations.html)) and the [associated extension docs](https://sphinxcontrib-bibtex.readthedocs.io/en/latest/usage.html).
#### By Name
This project uses the Python Language {cite}`perez2011python`.to build Jupyter Book {cite}`executable_book`
This will not work unless there is a section called `bibliography``
#### Bibliography for Names
There are different styles for the bibliography in this case
##### Alphanumeric - Sorted by Author, year
```{bibliography}
:style: alpha
```
##### Alphanumeric - Sorted by order of appearance
```{bibliography}
:style: unsrtalpha
```
##### Numeric - Sorted by Author, year
```{bibliography}
:style: plain
```
##### Numeric - Sorted by order of appearance
```{bibliography}
:style: unsrt
```
#### By Number
This project uses the Python Language {footcite}`perez2011python`.to build Jupyter Book {footcite}`executable_book`
This will not work unless there is a section called `footbibliography`
For different citation styles, check the [official docs](https://sphinxcontrib-bibtex.readthedocs.io/en/latest/usage.html#roles-and-directives)
When using `footcite` and `footbibliography`, the numbers will be arranged in combination with the footnotes. This could lead to confusion for the reader if this type of references are used in combination with footnotes. To mitigate this issue, a similar result can be achieve using `cite` and `bibliography` with the `unsrt` style.
#### Bibliography for Numbers
In this case, there is a single style
```{footbibliography}
```
|
github_jupyter
| 0.096376 | 0.903975 |
|
[Table of Contents](http://nbviewer.ipython.org/github/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/table_of_contents.ipynb)
# Nonlinear Filtering
```
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
```
## Introduction
The Kalman filter that we have developed uses linear equations, and so the filter can only handle linear problems. But the world is nonlinear, and so the classic filter that we have been studying to this point can have very limited utility.
There can be nonlinearity in the process model. Suppose we wanted to track the motion of a weight on a spring, such as an automobile's suspension. The equation for the motion with $m$ being the mass, $k$ the spring constant, and $c$ the damping force is
$$m\frac{d^2x}{dt^2} + c\frac{dx}{dt} +kx = 0$$
There is no linear solution for $x(t)$ for this second order differential equation, and therefore we cannot design a Kalman filter using the theory that we have learned.
A second source of nonlinearity comes from the measurements. For example, radars measure the slant range to an object, and we are typically interested in the aircraft's position over the ground. We invoke Pythagoras and get the nonlinear equation:
$$x=\sqrt{\mathtt{slant}^2 - \mathtt{altitude}^2}$$
These facts were not lost on the early adopters of the Kalman filter. Soon after Dr. Kalman published his paper people began working on how to extend the Kalman filter for nonlinear problems.
It is almost true to state that the only equation anyone knows how to solve is $\mathbf{Ax}=\mathbf{b}$. We only really know how to do linear algebra. I can give you any linear set of equations and you can either solve it or prove that it haas no solution.
Anyone with formal education in math or physics has spent years learning various analytic ways to solve integrals, differential equations and so on. Yet even trivial physical systems produce equations that cannot be solved analytically. I can take an equation that you are able to integrate, insert a $\log$ term, and render it insolvable. This leads to jokes about physicists stating "assume a spherical cow on a frictionless surface in a vacuum...". Without making extreme simplifications most physical problems do not have analytic solutions.
How do we do things like model airflow over an aircraft in a computer, or predict weather, or track missiles with a Kalman filter? We retreat to what we know: $\mathbf{Ax}=\mathbf{b}$. We find some way to linearize the problem, turning it into a set of linear equations, and then use linear algebra software packages to compute an approximate solution.
Linearizing a nonlinear problem gives us inexact answers, and in a recursive algorithm like a Kalman filter or weather tracking system these small errors can sometimes reinforce each other at each step, quickly causing the algorithm to spit out nonsense.
What we are about to embark upon is a difficult problem. There is not one obvious, correct, mathematically optimal solution anymore. We will be using approximations, we will be introducing errors into our computations, and we will forever be battling filters that *diverge*, that is, filters whose numerical errors overwhelm the solution.
In the remainder of this short chapter I will illustrate the specific problems the nonlinear Kalman filter faces. You can only design a filter after understanding the particular problems the nonlinearity in your problem causes. Subsequent chapters will then teach you how to design and implement different kinds of nonlinear filters.
## The Problem with Nonlinearity
The mathematics of the Kalman filter is beautiful in part due to the Gaussian equation being so special. It is nonlinear, but when we add and multiply them we get another Gaussian as a result. That is very rare. $\sin{x}*\sin{y}$ does not yield a $\sin$ as an output.
What I mean by linearity may be obvious, but there are some subtleties. The mathematical requirements are twofold:
* additivity: $f(x+y) = f(x) + f(y)$
* homogeneity: $f(ax) = af(x)$
This leads us to say that a linear system is defined as a system whose output is linearly proportional to the sum of all its inputs. A consequence of this is that to be linear if the input is zero than the output must also be zero. Consider an audio amp - if I sing into a microphone, and you start talking, the output should be the sum of our voices (input) scaled by the amplifier gain. But if amplifier outputs a nonzero signal such as a hum for a zero input the additive relationship no longer holds. This is because you linearity requires that $amp(voice) = amp(voice + 0)$ This clearly should give the same output, but if amp(0) is nonzero, then
$$
\begin{aligned}
amp(voice) &= amp(voice + 0) \\
&= amp(voice) + amp(0) \\
&= amp(voice) + non\_zero\_value
\end{aligned}
$$
which is clearly nonsense. Hence, an apparently linear equation such as
$$L(f(t)) = f(t) + 1$$
is not linear because $L(0) = 1$. Be careful!
## An Intuitive Look at the Problem
I particularly like the following way of looking at the problem, which I am borrowing from Dan Simon's *Optimal State Estimation* [[1]](#[1]). Consider a tracking problem where we get the range and bearing to a target, and we want to track its position. The reported distance is 50 km, and the reported angle is 90$^\circ$. Assume that the errors in both range and angle are distributed in a Gaussian manner. Given an infinite number of measurements what is the expected value of the position?
I have been recommending using intuition to gain insight, so let's see how it fares for this problem. We might reason that since the mean of the range will be 50 km, and the mean of the angle will be 90$^\circ$, that the answer will be x=0 km, y=50 km.
Let's plot that and find out. Here are 3000 points plotted with a normal distribution of the distance of 0.4 km, and the angle having a normal distribution of 0.35 radians. We compute the average of the all of the positions, and display it as a star. Our intuition is displayed with a large circle.
```
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
N = 3000
a = np.pi/2. + (randn(N) * 0.35)
r = 50.0 + (randn(N) * 0.4)
xs = r * np.cos(a)
ys = r * np.sin(a)
plt.scatter(xs, ys, label='Sensor', color='k', marker='.', s=2)
xs, ys = sum(xs)/N, sum(ys)/N
plt.scatter(xs, ys, c='r', marker='*', s=200, label='Mean')
plt.scatter(0, 50, c='k', marker='o', s=300, label='Intuition')
plt.axis('equal')
plt.legend();
```
We can see that out intuition failed us because the nonlinearity of the problem forced all of the errors to be biased in one direction. This bias, over many iterations, can cause the Kalman filter to diverge. Even if it doesn't diverge the solution will not be optimal. Linear approximations applied to nonlinear problems yields inaccurate results.
## The Effect of Nonlinear Functions on Gaussians
Gaussians are not closed under an arbitrary nonlinear function. Recall the equations of the Kalman filter - at each evolution we pass the Gaussian representing the state through the process function to get the Gaussian at time $k$. Our process function was always linear, so the output was always another Gaussian. Let's look at that on a graph. I will take an arbitrary Gaussian and pass it through the function $f(x) = 2x + 1$ and plot the result. We know how to do this analytically, but lets use sampling. I will generate 500,000 points with a normal distribution, pass them through $f(x)$, and plot the results. I do it this way because the next example will be nonlinear, and we will have no way to compute this analytically.
```
import numpy as np
from numpy.random import normal
gaussian = (0., 1.)
data = normal(loc=gaussian[0], scale=gaussian[1], size=500000)
plt.hist(2*data + 1, 1000);
```
This is an unsurprising result. The result of passing the Gaussian through $f(x)=2x+1$ is another Gaussian centered around 1. Let's look at the input, nonlinear function, and output at once.
```
from book_format import set_figsize, figsize
from nonlinear_plots import plot_nonlinear_func
def g1(x):
return 2*x+1
with figsize(y=5):
plot_nonlinear_func(data, g1, gaussian)
```
> I explain how to plot Gaussians, and much more, in the Notebook *Computing_and_Plotting_PDFs* in the
Supporting_Notebooks folder. You can also read it online [here](https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb)[1]
The plot labeled 'Input' is the histogram of the original data. This is passed through the function $f(x)=2x+1$ which is displayed in the chart on the bottom left. The red lines shows how one value, $x=0$ is passed through the function. Each value from input is passed through in the same way to the output function on the right. For the output I computed the mean by taking the average of all the points, and drew the results with the dotted blue line. A solid blue line shows the actual mean for the point $x=0$. The output looks like a Gaussian, and is in fact a Gaussian. We can see that the variance in the output is larger than the variance in the input, and the mean has been shifted from 0 to 1, which is what we would expect given the transfer function $f(x)=2x+1$ The $2x$ affects the variance, and the $+1$ shifts the mean The computed mean, represented by the dotted blue line, is nearly equal to the actual mean. If we used more points in our computation we could get arbitrarily close to the actual value.
Now let's look at a nonlinear function and see how it affects the probability distribution.
```
def g2(x):
return (np.cos(3*(x/2 + 0.7))) * np.sin(0.3*x) - 1.6*x
with figsize(y=5):
plot_nonlinear_func(data, g2, gaussian)
```
This result may be somewhat surprising to you. The function looks "fairly" linear, but the probability distribution of the output is completely different from a Gaussian. Recall the equations for multiplying two univariate Gaussians:
$$\begin{aligned}
\mu &=\frac{\sigma_1^2 \mu_2 + \sigma_2^2 \mu_1} {\sigma_1^2 + \sigma_2^2} \\
\sigma &= \frac{1}{\frac{1}{\sigma_1^2} + \frac{1}{\sigma_2^2}}
\end{aligned}$$
These equations do not hold for non-Gaussians, and certainly do not hold for the probability distribution shown in the 'Output' chart above.
Think of what this implies for the Kalman filter algorithm of the previous chapter. All of the equations assume that a Gaussian passed through the process function results in another Gaussian. If this is not true then all of the assumptions and guarantees of the Kalman filter do not hold. Let's look at what happens when we pass the output back through the function again, simulating the next step time step of the Kalman filter.
```
y = g2(data)
gaussian2 = (np.mean(y), np.var(y))
with figsize(y=5):
plot_nonlinear_func(y, g2, gaussian2)
```
As you can see the probability function is further distorted from the original Gaussian. However, the graph is still somewhat symmetric around x=0, let's see what the mean is.
```
print('input mean, variance: %.4f, %.4f' %
(np.mean(data), np.var(data)))
print('output mean, variance: %.4f, %.4f' %
(np.mean(y), np.var(y)))
```
Let's compare that to the linear function that passes through (-2,3) and (2,-3), which is very close to the nonlinear function we have plotted. Using the equation of a line we have
$$m=\frac{-3-3}{2-(-2)}=-1.5$$
```
def g3(x):
return -1.5 * x
with figsize(y=5):
plot_nonlinear_func(data, g3, gaussian)
out = g3(data)
print('output mean, variance: %.4f, %.4f' %
(np.mean(out), np.var(out)))
```
Although the shapes of the output are very different, the mean and variance of each are almost the same. This may lead us to reasoning that perhaps we can ignore this problem if the nonlinear equation is 'close to' linear. To test that, we can iterate several times and then compare the results.
```
out = g3(data)
out2 = g2(data)
for i in range(10):
out = g3(out)
out2 = g2(out2)
print('linear output mean, variance: %.4f, %.4f' %
(np.average(out), np.std(out)**2))
print('nonlinear output mean, variance: %.4f, %.4f' %
(np.average(out2), np.std(out2)**2))
```
Unfortunately the nonlinear version is not stable. It drifted significantly from the mean of 0, and the variance is half an order of magnitude larger.
I minimized the issue by using a function that is quite close to a straight line. What happens if the function is $y(x)=x^2$?
```
def g3(x):
return -x*x
x0 = (1, 1)
data = normal(loc=x0[0], scale=x0[1], size=500000)
with figsize(y=5):
plot_nonlinear_func(data, g3, gaussian=x0)
```
Despite the curve being smooth and reasonably straight at $x=1$ the probability distribution of the output doesn't look anything like a Gaussian and the computed mean of the output is quite different than the value computed directly. This is not an unusual function - a ballistic object moves in a parabola, and this is the sort of nonlinearity your filter will need to handle. If you recall we've tried to track a ball and failed miserably. This graph should give you insight into why the filter performed so poorly.
## A 2D Example
It is hard to look at probability distributions and reason about what will happen in a filter. So let's think about tracking an aircraft with radar. The estimate may have a covariance that looks like this:
```
import nonlinear_internal
nonlinear_internal.plot1()
```
What happens when we try to linearize this problem? The radar gives us a range to the aircraft. Suppose the radar is directly under the aircraft (x=10) and the next measurement states that the aircraft is 3 miles away (y=3). The positions that could match that measurement form a circle with radius 3 miles, like so.
```
nonlinear_internal.plot2()
```
We can see by inspection that the probable position of the aircraft is somewhere near x=11.4, y=2.7 because that is where the covariance ellipse and range measurement overlap. But the range measurement is nonlinear so we have to linearize it. We haven't covered this material yet, but the Extended Kalman filter will linearize at the last position of the aircraft - (10,2). At x=10 the range measurement has y=3, and so we linearize at that point.
```
nonlinear_internal.plot3()
```
Now we have a linear representation of the problem (literally a straight line) which we can solve. Unfortunately you can see that the intersection of the line and the covariance ellipse is a long way from the actual aircraft position.
```
nonlinear_internal.plot4()
```
That sort of error often leads to disastrous results. The error in this estimate is large. But in the next innovation of the filter that very bad estimate will be used to linearize the next radar measurement, so the next estimate is likely to be markedly worse than this one. After only a few iterations the Kalman filter will diverge, and start producing results that have no correspondence to reality.
This covariance ellipse spans miles. I exaggerated the size to illustrate the difficulties of highly nonlinear systems. In real radar tracking problems the nonlinearity is usually not that bad, but the errors will still accumulate. Other systems you may be work could have this amount of nonlinearity - this was not an exaggeration only to make a point. You will always be battling divergence when working with nonlinear systems.
## The Algorithms
You may be impatient to solve a specific problem, and wondering which filter to use. I will quickly survey the options. The subsequent chapters are somewhat independent of each other, and you can fruitfully skip around, though I recommend reading linearly if you truly want to master all of the material.
The workhorses of nonlinear filters are the *linearized Kalman filter* and *extended Kalman filter* (EKF). These two techniques were invented shortly after Kalman published his paper and they have been the main techniques used since then. The flight software in airplanes, the GPS in your car or phone almost certainly use one of these techniques.
However, these techniques are extremely demanding. The EKF linearizes the differential equations at one point, which requires you to find a solution to a matrix of partial derivatives (a Jacobian). This can be difficult or impossible to do analytically. If impossible, you have to use numerical techniques to find the Jacobian, but this is expensive computationally and introduces more error into the system. Finally, if the problem is quite nonlinear the linearization leads to a lot of error being introduced in each step, and the filters frequently diverge. You can not throw some equations into some arbitrary solver and expect to to get good results. It's a difficult field for professionals. I note that most Kalman filtering textbooks merely gloss over the EKF despite it being the most frequently used technique in real world applications.
Recently the field has been changing in exciting ways. First, computing power has grown to the point that we can use techniques that were once beyond the ability of a supercomputer. These use *Monte Carlo* techniques - the computer generates thousands to tens of thousands of random points and tests all of them against the measurements. It then probabilistically kills or duplicates points based on how well they match the measurements. A point far away from the measurement is unlikely to be retained, whereas a point very close is quite likely to be retained. After a few iterations there is a clump of particles closely tracking your object, and a sparse cloud of points where there is no object.
This has two benefits. First, the algorithm is robust even for extremely nonlinear problems. Second, the algorithm can track arbitrarily many objects at once - some particles will match the behavior on one object, and other particles will match other objects. So this technique is often used to track automobile traffic, people in crowds, and so on.
The costs should be clear. It is computationally expensive to test tens of thousands of points for every step in the filter. But modern CPUs are very fast, and this is a good problem for GPUs because the part of the algorithm is parallelizable. Another cost is that the answer is not mathematical. With a Kalman filter my covariance matrix gives me important information about the amount of error in the estimate. The particle filter does not give me a rigorous way to compute this. Finally, the output of the filter is a cloud of points; I then have to figure out how to interpret it. Usually you will be doing something like taking the mean and standard deviations of the points, but this is a difficult problem. There are still many points that do not 'belong' to a tracked object, so you first have to run some sort of clustering algorithm to first find the points that seem to be tracking an object, and then you need another algorithm to produce an state estimate from those points. None of this is intractable, but it is all quite computationally expensive.
Finally, we have a new algorithm called the *unscented Kalman filter* (UKF). It does not require you to find analytic solutions to nonlinear equations, and yet almost always performs better than the EKF. It does well with nonlinear problems - problems where the EKF has significant difficulties. Designing the filter is extremely easy. Some will say the jury is still out on the UKF, but to my mind the UKF is superior in almost every way to the EKF. I suggest that the UKF should be the starting point for any implementation, especially if you are not a Kalman filter professional with a graduate degree in control theory. The main downside is that the UKF can be a few times slower than the EKF, but this really depends on whether the EKF solves the Jacobian analytically or numerically. If numerically the UKF is almost certainly faster. It has not been proven (and probably it cannot be proven) that the UKF always yields more accurate results than the EKF. In practice it almost always does, often significantly so. It is very easy to understand and implement, and I strongly suggest this filter as your starting point.
## Summary
The world is nonlinear, but we only really know how to solve linear problems. This introduces significant difficulties for Kalman filters. We've looked at how nonlinearity affects filtering in 3 different but equivalent ways, and I've given you a brief summary of the major appoaches: the linearized Kalman filter, the extended Kalman filter, the Unscented Kalman filter, and the particle filter.
Until recently the linearized Kalman filter and EKF have been the standard way to solve these problems. They are very difficult to understand and use, and they are also potentially very unstable.
Recent developments have offered what are to my mind superior approaches. The UKF dispenses with the need to find solutions to partial differential equations, yet it is also usually more accurate than the EKF. It is easy to use and understand. I can get a basic UKF going in a few minutes by using FilterPy. The particle filter dispenses with mathimatical modeling completely in favor of a Monte Carlo technique of generating a random cloud of thousands of points. It runs slowly, but it can solve otherwise intractable problems with relative ease.
I get more email about the EKF than anything else; I suspect that this is because most treatments in books, papers, and on the internet use the EKF. If your interest is in mastering the field of course you will want to learn about the EKF. But if you are just trying to get good results I point you to the UKF and particle filter first. They are much easier to implement, understand, and use, and they are typically far more stable than the EKF.
Some will quibble with that advice. A lot of recent publications are devoted to a comparison of the EKF, UKF, and perhaps a few other choices for a given problem. Do you not need to perform a similar comparison for your problem? If you are sending a rocket to Mars then of course you do. You will be balancing issues such as accuracy, round off errors, divergence, mathematical proof of correctness, and the computational effort required. I can't imagine not knowing the EKF intimately.
On the other hand the UKF works spectacularly! I use it at work for real world applications. I mostly haven't even tried to implement an EKF for these applications because I can verify that the UKF is working fine. Is it possible that I might eke out another 0.2% of performance from the EKF in certain situations? Sure! Do I care? No! I completely understand the UKF implementation, it is easy to test and verify, I can pass the code to others and be confident that they can understand and modify it, and I am not a masochist that wants to battle difficult equations when I already have a working solution. If the UKF or particle filters start to perform poorly for some problem then I will turn other techniques, but not before then. And realistically, the UKF usually provides substantially better performance than the EKF over a wide range of problems and conditions. If "really good" is good enough I'm going to spend my time working on other problems.
I'm belaboring this point because in most textbooks the EKF is given center stage, and the UKF is either not mentioned at all or just given a 2 page gloss that leaves you completely unprepared to use the filter. The UKF is still relatively new, and it takes time to write new editions of books. At the time many books were written the UKF was either not discovered yet, or it was just an unproven but promising curiosity. But I am writing this now, the UKF has had enormous success, and it needs to be in your toolkit. That is what I will spend most of my effort trying to teach you.
## References
<A name="[1]">[1]</A> https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/Supporting_Notebooks/Computing_and_plotting_PDFs.ipynb
|
github_jupyter
|
#format the book
%matplotlib inline
from __future__ import division, print_function
from book_format import load_style
load_style()
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
N = 3000
a = np.pi/2. + (randn(N) * 0.35)
r = 50.0 + (randn(N) * 0.4)
xs = r * np.cos(a)
ys = r * np.sin(a)
plt.scatter(xs, ys, label='Sensor', color='k', marker='.', s=2)
xs, ys = sum(xs)/N, sum(ys)/N
plt.scatter(xs, ys, c='r', marker='*', s=200, label='Mean')
plt.scatter(0, 50, c='k', marker='o', s=300, label='Intuition')
plt.axis('equal')
plt.legend();
import numpy as np
from numpy.random import normal
gaussian = (0., 1.)
data = normal(loc=gaussian[0], scale=gaussian[1], size=500000)
plt.hist(2*data + 1, 1000);
from book_format import set_figsize, figsize
from nonlinear_plots import plot_nonlinear_func
def g1(x):
return 2*x+1
with figsize(y=5):
plot_nonlinear_func(data, g1, gaussian)
def g2(x):
return (np.cos(3*(x/2 + 0.7))) * np.sin(0.3*x) - 1.6*x
with figsize(y=5):
plot_nonlinear_func(data, g2, gaussian)
y = g2(data)
gaussian2 = (np.mean(y), np.var(y))
with figsize(y=5):
plot_nonlinear_func(y, g2, gaussian2)
print('input mean, variance: %.4f, %.4f' %
(np.mean(data), np.var(data)))
print('output mean, variance: %.4f, %.4f' %
(np.mean(y), np.var(y)))
def g3(x):
return -1.5 * x
with figsize(y=5):
plot_nonlinear_func(data, g3, gaussian)
out = g3(data)
print('output mean, variance: %.4f, %.4f' %
(np.mean(out), np.var(out)))
out = g3(data)
out2 = g2(data)
for i in range(10):
out = g3(out)
out2 = g2(out2)
print('linear output mean, variance: %.4f, %.4f' %
(np.average(out), np.std(out)**2))
print('nonlinear output mean, variance: %.4f, %.4f' %
(np.average(out2), np.std(out2)**2))
def g3(x):
return -x*x
x0 = (1, 1)
data = normal(loc=x0[0], scale=x0[1], size=500000)
with figsize(y=5):
plot_nonlinear_func(data, g3, gaussian=x0)
import nonlinear_internal
nonlinear_internal.plot1()
nonlinear_internal.plot2()
nonlinear_internal.plot3()
nonlinear_internal.plot4()
| 0.713731 | 0.989899 |
Project - Email Sending
```
!pip install emails
import emails
html_text = '''<p><span style="font-family: Courier New, courier;"><span style="background-color: rgb(247, 218, 100);">HELLo</span> </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">How are you this?, </span></p>
<p><span style="font-family: Courier New, courier;">Student!!</span></p>
<p><span style="font-family: Courier New, courier;">studying engineering, I like this project </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">Regards, </span></p>
<p><strong><span style="font-family: Courier New, courier;">Student,</span></strong></p>
<p><span style="font-family: Courier New, courier;"><strong>Thejaswini N</strong></span></p>'''
message = emails.html(html=html_text,
subject="Your EMAIL FROM PYTHON SCRIPT",
mail_from=('himadri', '[email protected]'))
mail_via_python = message.send(to='[email protected]',
smtp={'host': 'smtp.gmail.com',
'timeout': 5,
'port':587,
'user':'[email protected]',
'password':'12345abcd@',
'tls':True})
mail_via_python
def sendMail(email, name):
html_text = '''<p><span style="font-family: Courier New, courier;"><span style="background-color: rgb(247, 218, 100);">HELLo</span> </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">How are you this?, </span></p>
<p><span style="font-family: Courier New, courier;">Student!!</span></p>
<p><span style="font-family: Courier New, courier;">studying engineering, I like this project </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">Regards, </span></p>
<p><strong><span style="font-family: Courier New, courier;">Student,</span></strong></p>
<p><span style="font-family: Courier New, courier;"><strong>Thejaswini N</strong></span></p>'''
subject = "Hey! wassup "+ name + ", you have EMAIL FROM Student"
message = emails.html(html=html_text,
subject="Your EMAIL FROM PYTHON SCRIPT",
mail_from=('himadri', '[email protected]'))
mail_via_python = message.send(to='[email protected]',
smtp={'host': 'smtp.gmail.com',
'timeout': 5,
'port':587,
'user':'[email protected]',
'password':'12345abcd@',
'tls':True})
return mail_via_python.status_code
sendMail("[email protected]","himadri")
```
|
github_jupyter
|
!pip install emails
import emails
html_text = '''<p><span style="font-family: Courier New, courier;"><span style="background-color: rgb(247, 218, 100);">HELLo</span> </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">How are you this?, </span></p>
<p><span style="font-family: Courier New, courier;">Student!!</span></p>
<p><span style="font-family: Courier New, courier;">studying engineering, I like this project </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">Regards, </span></p>
<p><strong><span style="font-family: Courier New, courier;">Student,</span></strong></p>
<p><span style="font-family: Courier New, courier;"><strong>Thejaswini N</strong></span></p>'''
message = emails.html(html=html_text,
subject="Your EMAIL FROM PYTHON SCRIPT",
mail_from=('himadri', '[email protected]'))
mail_via_python = message.send(to='[email protected]',
smtp={'host': 'smtp.gmail.com',
'timeout': 5,
'port':587,
'user':'[email protected]',
'password':'12345abcd@',
'tls':True})
mail_via_python
def sendMail(email, name):
html_text = '''<p><span style="font-family: Courier New, courier;"><span style="background-color: rgb(247, 218, 100);">HELLo</span> </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">How are you this?, </span></p>
<p><span style="font-family: Courier New, courier;">Student!!</span></p>
<p><span style="font-family: Courier New, courier;">studying engineering, I like this project </span></p>
<p><span style="font-family: Courier New, courier;"><br></span></p>
<p><span style="font-family: Courier New, courier;">Regards, </span></p>
<p><strong><span style="font-family: Courier New, courier;">Student,</span></strong></p>
<p><span style="font-family: Courier New, courier;"><strong>Thejaswini N</strong></span></p>'''
subject = "Hey! wassup "+ name + ", you have EMAIL FROM Student"
message = emails.html(html=html_text,
subject="Your EMAIL FROM PYTHON SCRIPT",
mail_from=('himadri', '[email protected]'))
mail_via_python = message.send(to='[email protected]',
smtp={'host': 'smtp.gmail.com',
'timeout': 5,
'port':587,
'user':'[email protected]',
'password':'12345abcd@',
'tls':True})
return mail_via_python.status_code
sendMail("[email protected]","himadri")
| 0.253399 | 0.7214 |
# Convolutional Autoencoder
Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.
```
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
```
## Network Architecture
The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.
<img src='assets/convolutional_autoencoder.png' width=500px>
Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.
### What's going on with the decoder
Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers *aren't*. Usually, you'll see **transposed convolution** layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, [`tf.nn.conv2d_transpose`](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d_transpose).
However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In [this Distill article](http://distill.pub/2016/deconv-checkerboard/) from Augustus Odena, *et al*, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with [`tf.image.resize_images`](https://www.tensorflow.org/versions/r1.1/api_docs/python/tf/image/resize_images), followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.
> **Exercise:** Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by a factor of 2. Odena *et al* claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in `tf.image.resize_images` or use [`tf.image.resize_nearest_neighbor`]( `https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor). For convolutional layers, use [`tf.layers.conv2d`](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d). For example, you would write `conv1 = tf.layers.conv2d(inputs, 32, (5,5), padding='same', activation=tf.nn.relu)` for a layer with a depth of 32, a 5x5 kernel, stride of (1,1), padding is 'same', and a ReLU activation. Similarly, for the max-pool layers, use [`tf.layers.max_pooling2d`](https://www.tensorflow.org/api_docs/python/tf/layers/max_pooling2d).
```
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
```
## Training
As before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.
```
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
```
## Denoising
As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.

Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.
> **Exercise:** Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.
```
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
```
## Checking out the performance
Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is.
```
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
| 0.785226 | 0.993423 |
## Computer Vision Learner
[`vision.learner`](/vision.learner.html#vision.learner) is the module that defines the [`cnn_learner`](/vision.learner.html#cnn_learner) method, to easily get a model suitable for transfer learning.
```
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
```
## Transfer learning
Transfer learning is a technique where you use a model trained on a very large dataset (usually [ImageNet](http://image-net.org/) in computer vision) and then adapt it to your own dataset. The idea is that it has learned to recognize many features on all of this data, and that you will benefit from this knowledge, especially if your dataset is small, compared to starting from a randomly initialized model. It has been proved in [this article](https://arxiv.org/abs/1805.08974) on a wide range of tasks that transfer learning nearly always give better results.
In practice, you need to change the last part of your model to be adapted to your own number of classes. Most convolutional models end with a few linear layers (a part we will call the head). The last convolutional layer will have analyzed features in the image that went through the model, and the job of the head is to convert those in predictions for each of our classes. In transfer learning we will keep all the convolutional layers (called the body or the backbone of the model) with their weights pretrained on ImageNet but will define a new head initialized randomly.
Then we will train the model we obtain in two phases: first we freeze the body weights and only train the head (to convert those analyzed features into predictions for our own data), then we unfreeze the layers of the backbone (gradually if necessary) and fine-tune the whole model (possibly using differential learning rates).
The [`cnn_learner`](/vision.learner.html#cnn_learner) factory method helps you to automatically get a pretrained model from a given architecture with a custom head that is suitable for your data.
```
show_doc(cnn_learner)
```
This method creates a [`Learner`](/basic_train.html#Learner) object from the [`data`](/vision.data.html#vision.data) object and model inferred from it with the backbone given in `base_arch`. Specifically, it will cut the model defined by `arch` (randomly initialized if `pretrained` is False) at the last convolutional layer by default (or as defined in `cut`, see below) and add:
- an [`AdaptiveConcatPool2d`](/layers.html#AdaptiveConcatPool2d) layer,
- a [`Flatten`](/layers.html#Flatten) layer,
- blocks of \[[`nn.BatchNorm1d`](https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm1d), [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout), [`nn.Linear`](https://pytorch.org/docs/stable/nn.html#torch.nn.Linear), [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU)\] layers.
The blocks are defined by the `lin_ftrs` and `ps` arguments. Specifically, the first block will have a number of inputs inferred from the backbone `base_arch` and the last one will have a number of outputs equal to `data.c` (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_ftrs` (of course a block has a number of inputs equal to the number of outputs of the previous block). The default is to have an intermediate hidden size of 512 (which makes two blocks `model_activation` -> 512 -> `n_classes`). If you pass a float then the final dropout layer will have the value `ps`, and the remaining will be `ps/2`. If you pass a list then the values are used for dropout probabilities directly.
Note that the very last block doesn't have a [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU) activation, to allow you to use any final activation you want (generally included in the loss function in pytorch). Also, the backbone will be frozen if you choose `pretrained=True` (so only the head will train if you call [`fit`](/basic_train.html#fit)) so that you can immediately start phase one of training as described above.
Alternatively, you can define your own `custom_head` to put on top of the backbone. If you want to specify where to split `base_arch` you should so in the argument `cut` which can either be the index of a specific layer (the result will not include that layer) or a function that, when passed the model, will return the backbone you want.
The final model obtained by stacking the backbone and the head (custom or defined as we saw) is then separated in groups for gradual unfreezing or differential learning rates. You can specify how to split the backbone in groups with the optional argument `split_on` (should be a function that returns those groups when given the backbone).
The `kwargs` will be passed on to [`Learner`](/basic_train.html#Learner), so you can put here anything that [`Learner`](/basic_train.html#Learner) will accept ([`metrics`](/metrics.html#metrics), `loss_func`, `opt_func`...)
```
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learner = cnn_learner(data, models.resnet18, metrics=[accuracy])
learner.fit_one_cycle(1,1e-3)
learner.save('one_epoch')
show_doc(unet_learner)
```
This time the model will be a [`DynamicUnet`](/vision.models.unet.html#DynamicUnet) with an encoder based on `arch` (maybe `pretrained`) that is cut depending on `split_on`. `blur_final`, `norm_type`, `blur`, `self_attention`, `y_range`, `last_cross` and `bottle` are passed to unet constructor, the `kwargs` are passed to the initialization of the [`Learner`](/basic_train.html#Learner).
```
jekyll_warn("The models created with this function won't work with pytorch `nn.DataParallel`, you have to use distributed training instead!")
```
### Get predictions
Once you've actually trained your model, you may want to use it on a single image. This is done by using the following method.
```
show_doc(Learner.predict)
img = learner.data.train_ds[0][0]
learner.predict(img)
```
Here the predict class for our image is '3', which corresponds to a label of 0. The probabilities the model found for each class are 0.65 and 0.35 respectively, so its confidence is pretty high.
Note that if you want to load your trained model and use it on inference mode with the previous function, you should export your [`Learner`](/basic_train.html#Learner).
```
learner.export()
```
And then you can load it with an empty data object that has the same internal state like this:
```
learn = load_learner(path)
```
### Customize your model
You can customize [`cnn_learner`](/vision.learner.html#cnn_learner) for your own model's default `cut` and `split_on` functions by adding them to the dictionary `model_meta`. The key should be your model and the value should be a dictionary with the keys `cut` and `split_on` (see the source code for examples). The constructor will call [`create_body`](/vision.learner.html#create_body) and [`create_head`](/vision.learner.html#create_head) for you based on `cut`; you can also call them yourself, which is particularly useful for testing.
```
show_doc(create_body)
show_doc(create_head, doc_string=False)
```
Model head that takes `nf` features, runs through `lin_ftrs`, and ends with `nc` classes. `ps` is the probability of the dropouts, as documented above in [`cnn_learner`](/vision.learner.html#cnn_learner).
```
show_doc(ClassificationInterpretation, title_level=3)
```
This provides a confusion matrix and visualization of the most incorrect images. Pass in your [`data`](/vision.data.html#vision.data), calculated `preds`, actual `y`, and your `losses`, and then use the methods below to view the model interpretation results. For instance:
```
learn = cnn_learner(data, models.resnet18)
learn.fit(1)
preds,y,losses = learn.get_preds(with_loss=True)
interp = ClassificationInterpretation(learn, preds, y, losses)
```
The following factory method gives a more convenient way to create an instance of this class:
```
show_doc(ClassificationInterpretation.from_learner, full_name='from_learner')
```
You can also use a shortcut `learn.interpret()` to do the same.
```
show_doc(Learner.interpret, full_name='interpret')
```
Note that this shortcut is a [`Learner`](/basic_train.html#Learner) object/class method that can be called as: `learn.interpret()`.
```
show_doc(ClassificationInterpretation.plot_top_losses, full_name='plot_top_losses')
```
The `k` items are arranged as a square, so it will look best if `k` is a square number (4, 9, 16, etc). The title of each image shows: prediction, actual, loss, probability of actual class. When `heatmap` is True (by default it's False) , Grad-CAM heatmaps (http://openaccess.thecvf.com/content_ICCV_2017/papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf) are overlaid on each image. `plot_top_losses` should be used with single-labeled datasets. See `plot_multi_top_losses` below for a version capable of handling multi-labeled datasets.
```
interp.plot_top_losses(9, figsize=(7,7))
show_doc(ClassificationInterpretation.plot_multi_top_losses, full_name='plot_multi_top_losses')
```
Similar to `plot_top_losses()` but aimed at multi-labeled datasets. It plots misclassified samples sorted by their respective loss.
Since you can have multiple labels for a single sample, they can easily overlap in a grid plot. So it plots just one sample per row.
Note that you can pass `save_misclassified=True` (by default it's `False`). In such case, the method will return a list containing the misclassified images which you can use to debug your model and/or tune its hyperparameters.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
## New Methods - Please document or move to the undocumented section
|
github_jupyter
|
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
show_doc(cnn_learner)
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learner = cnn_learner(data, models.resnet18, metrics=[accuracy])
learner.fit_one_cycle(1,1e-3)
learner.save('one_epoch')
show_doc(unet_learner)
jekyll_warn("The models created with this function won't work with pytorch `nn.DataParallel`, you have to use distributed training instead!")
show_doc(Learner.predict)
img = learner.data.train_ds[0][0]
learner.predict(img)
learner.export()
learn = load_learner(path)
show_doc(create_body)
show_doc(create_head, doc_string=False)
show_doc(ClassificationInterpretation, title_level=3)
learn = cnn_learner(data, models.resnet18)
learn.fit(1)
preds,y,losses = learn.get_preds(with_loss=True)
interp = ClassificationInterpretation(learn, preds, y, losses)
show_doc(ClassificationInterpretation.from_learner, full_name='from_learner')
show_doc(Learner.interpret, full_name='interpret')
show_doc(ClassificationInterpretation.plot_top_losses, full_name='plot_top_losses')
interp.plot_top_losses(9, figsize=(7,7))
show_doc(ClassificationInterpretation.plot_multi_top_losses, full_name='plot_multi_top_losses')
| 0.728362 | 0.989024 |
# Task 1: Introduction
Welcome to TensorFlow Beginner: Basic Image Classification! By the end of the project, you'd have created and trained a Neural Network model that, after the training, will be able to predict digits from hand-written images with a high degree of accuracy and along the way, you'd have developed a basic understanding of how Neural Networks work and you'd have developed a basic understanding of TensorFlow syntax with Keras as its front end.
This graph describes the problem that we are trying to solve visually. We want to create and train a model that takes an image of a hand written digit as input and predicts the class of that digit, that is, it predicts the digit or it predicts the class of the input image.

```
import tensorflow as tf
print('Using TensorFlow version', tf.__version__)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
```
# Task 2: The Dataset
In order to understand our problem better, we will first import the data that we'd be working with and take a closer look at it. We are going to use the popular MNIST dataset which has lots of images of hand-written digits along with their labels.
```
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print('x_train shape: ', x_train.shape)
print('y_train shape: ', y_train.shape)
print('x_test shape: ', x_test.shape)
print('y_test shape: ', y_test.shape)
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(x_train[0], cmap = 'binary')
plt.show()
y_train[0]
y_train[:10]
```
# Task 3: One Hot Encoding
We will change the way this label is represented from a class name or number to a list of all possible classes with all the classes set to 0 except the one which this example belongs to - which will be set to 1. For example:
| original label | one-hot encoded label |
|------|------|
| 5 | [0, 0, 0, 0, 0, 1, 0, 0, 0, 0] |
| 7 | [0, 0, 0, 0, 0, 0, 0, 1, 0, 0] |
| 1 | [0, 1, 0, 0, 0, 0, 0, 0, 0, 0] |
```
!pip install tensorflow.python.keras.utils
from keras.utils.np_utils import to_categorical
y_train_encoded = to_categorical(y_train)
y_test_encoded = to_categorical(y_test)
```
To make sure the encoding worked, let's check the shape of the encoded labels.
```
print('y_train shape: ', y_train_encoded.shape)
print('y_test shape: ', y_test_encoded.shape)
```
And just like before, let's also take a look at the first label and make sure that encoding is correct:
```
y_train_encoded[0]
```
# Task 4: Neural Networks
Consider the following graph:

The above graph simply represents the equation:
\begin{equation}
y = w1 * x1 + w2 * x2 + w3 * x3 + b
\end{equation}
Where the `w1, w2, w3` are called the weights and `b` is an intercept term called bias. The graph above, therefore, is simply a graphical representation of a simple linear equation. The equation can also be *vectorised* like this:
\begin{equation}
y = W . X + b
\end{equation}
Where `X = [x1, x2, x3]` and `W = [w1, w2, w3].T`. The .T means *transpose*. This is because we want the dot product to give us the result we want i.e. `w1 * x1 + w2 * x2 + w3 * x3`. This gives us the vectorised version of our linear equation.
What we are trying to ask, essentially, is if given a large amount of data (pairs of X and corresponding y), can we write an algorithm to figure out the optimal values of W and b? We need to find a way for our model to find the *optimal* values for W and b and not the absolute values. Absolute values probably don't even exist given the limitations of our mathematical model since we are *assuming* a linear function for a problem where it might be a much more complex one in reality and we don't know what that function is.
By taking the observed data and a proposed model, we want to write an algorithm to learn the values for W and b which best fit the data and ultimately, by doing that, we learn an approximate function which maps the inputs to outputs of our data. This type of algorithm is called an _optimization_ algorithm and there are a few different optimization algorithms that are typically used in training neural networks.
In our problem, our examples are of shape`(60000, 28, 28)`. The first dimension is simply the number of examples we have, so each example is of the shape `(28, 28)`. If we unroll this array into a single dimension, it will become a `28 * 28 = 784` dimensional vector. Now, it can probably be modeled somewhat like a linear equation, right? Given features from `x1` to `x784`, we get an output `y`. Here, each pixel value is a feature in our examples.

This may actually work for really simple problems but in our case, this model will turn out to be insufficient.
Turns out, we can learn much more complex functions by simply *cascading* the linear functions one after the other. The only additional thing that a node in a neural network does (as opposed to a node in a linear equation shown above) is that an activation function is applied to each linear output. The purpose of an activation functions is to help the neural network find non-linear patterns in the data because if we just cascaded the neurons or nodes like the ones described above, even with many layers of cascaded linear functions, the result will still be a linear function which means that, after training the mode, it will learn a linear function that best fit the data. This is a problem because in many, if not most cases, the input to output map is going to be much more complex than a linear function. So, the activation gives the model more flexibility, and allows the model to be able to learn non-linear patterns.
Now, instead of setting y to a weighted sum of our input features, we can get a few hidden outputs which are weighted sums of our input features passed through an activation function and then get the weighted sums of those hidden outputs and so on. We do this a few times, and then get to our output y. This type of model gives our algorithm a much greater chance of learning a complex function.

In the network above, we have two *hidden layers*. The first layer with all the X features is called the input layer and the output y is called the output layer. In this example, the output has only one __node__. The hidden layer can have a lot of nodes or a very few nodes depending on how complex the problem may be. Here, both the hidden layer have 2 nodes each. Each node gives the output of a linear function after the linear output passes through an activation function, and takes inputs from each node of the preceding layer. All the W's and all the b's associated with all of these functions will have to be "learned" by our algorithm as it attempts to optimize those values in order to best fit the given data. Note that the total number of learnable parameters in any layer depend on the number of nodes in that layer as well as on the number of nodes in the preceding layer. For example, learnable parameters for __hidden layer 1__ can be calculated as: (number of nodes of the layer) * (number of nodes of preceding layer) + (number of nodes of the layer). Why? The first part is obvious: if every node of a layer is connected to every node of the preceding layer, we can simply multiply the number of nodes of these two layers to get the total number of weight parameters. Also, the __bias__ from previous layer would be connected to each node in the layer as well - that gives us the second term. So, for __hidden layer 1__, we get: `2 * 2 + 2 = 6` learnable parameters.
In the hand-written digit classification problem, we will have 128 nodes for two hidden layers, we will have 10 nodes for the output layer with each node corresponding to one output class, and of course we already know that the input is a 784 dimensional vector.
# Task 5: Preprocessing the Examples
We will create a Neural Network which will take 784 dimensional vectors as inputs (28 rows * 28 columns) and will output a 10 dimensional vector (For the 10 classes). We have already converted the outputs to 10 dimensional, one-hot encoded vectors. Now, let's convert the input to the required format as well. We will use numpy to easily unroll the examples from `(28, 28)` arrays to `(784, 1)` vectors.
```
import numpy as np
x_train_reshaped = np.reshape(x_train, (60000, 784))
x_test_reshaped = np.reshape(x_test, (10000, 784))
print('x_train_reshaped shape: ', x_train_reshaped.shape)
print('x_test_reshaped shape: ', x_test_reshaped.shape)
```
Each element in each example is a pixel value. Let's take a look at a few values of just one example.
```
print(set(x_train_reshaped[0]))
```
Pixel values, in this dataset, range from 0 to 255. While that's fine if we want to display our images, for our neural network to learn the weights and biases for different layers, computations will be simply much more effective and fast if we *normalized* these values. In order to normalize the data, we can calculate the mean and standard deviation for each example.
```
x_mean = np.mean(x_train_reshaped)
x_std = np.std(x_train_reshaped)
print('mean: ', x_mean)
print('std: ', x_std)
```
Now we will normalise both the training and test set using the mean and standard deviation we just calculated. Notice that we will need to apply the same mean and standard deviation to the test set even though we did not use the test set to calculate these values.
```
epsilon = 1e-10
x_train_norm = (x_train_reshaped - x_mean)/(x_std + epsilon)
x_test_norm = (x_test_reshaped - x_mean)/(x_std + epsilon)
```
Note how we added a small value to our denominator. This is because, just in case if our std was close to zero, we'd get very large values as a result. In this case, that's obviously not true but we added this anyway as a good practice since this is typically done to ensure numerical stability.
We looked at some of the values for the first training example before. Let's take a look at it again, after having normalised the values.
```
print(set(x_train_norm[0]))
```
# Task 6: Creating a Model
We use a Sequential class defined in Keras to create our model. All the layers are going to be Dense layers. This means, like our examples above, all the nodes of a layer would be connected to all the nodes of the preceding layer i.e. densely connected.
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential([
Dense(128, activation = 'relu', input_shape = (784,)),
Dense(128, activation = 'relu'),
Dense(10, activation = 'softmax')
])
```
Let's understand the code above. We are instantiating a Sequential model. We pass on a list of layers that we want in our model, in the order that we want them. So, we have two hidden layers with 128 nodes each and one output layer with 10 nodes. We set the input shape on the first hidden layer to correspond to the shape of a single example from our reshaped training and test sets - we know each example is a 784 dimensional vector for the 784 pixels of the images.
We have talked about each node having a weighted sum of the inputs of the preceding layer. And, before this sum is fed to the next layer's nodes, it goes through another function called an activation function. So, each node actually does two things. First step is the weighted sum, let's call it Z:
\begin{equation}
Z = W . X + b
\end{equation}
The second step in the node is the activation function output, let's call it A:
\begin{equation}
A = f(Z)
\end{equation}
There are various types of activation functions used in Neural Networks. One of the more common ones is a rectified linear unit of ReLU function. It's a pretty simple function: it's a linear function for all the positive values and is simply set to `0` for all the negative values. Something like this:

Another activation function we are using is called *softmax*. This function gives us probability scores for various nodes, in this case 10 nodes of the output layer, which sum upto 1. This activation gives us the probabilities for various classes given the input. The class with the highest probability gives us our prediction.
In addition to setting up our model architecture, we also need to define which algorithm should the model use in order to optimize the weights and biases as per the given data. We will use stochastic gradient descent.
We also need to define a loss function. Think of this function as the difference between the predicted outputs and the actual outputs given in the dataset. This loss needs to be minimised in order to have a higher model accuracy. That's what the optimasation algorithm essentially does - it minimises the loss during model training. For our multi-class classification problem, *categorical cross entropy* is commonly used.
Finally, we will use the accuracy during training as a metric to keep track of as the model trains.
```
model.compile(
optimizer = 'sgd',
loss = 'categorical_crossentropy',
metrics = ['accuracy']
)
model.summary()
```
Notice how we have over 100 thousand parameters (the weights and biases) to learn even though we are using only 2 hidden layers. For deep neural networks, this value can be, and often is, in millions.
In order to get the approximation of our function, we just need to fit the model to our data. We will use only training set to do this learning and will reserve the test set for later when we want to check the accuracy of our model. This is because, if we used only one set for both training and testing, the results may be biased and our model may have simply memorized all the examples instead of learning the relationship between features and label.
# Task 7: Training the Model
We are going to train the model for 5 epochs. Think of epoch like an iteration of all the examples going through the model. So, by setting the epochs to 3, we will go through all the training examples 3 times.
```
h = model.fit(
x_train_norm,
y_train_encoded,
epochs = 3
)
```
In order to ensure that this is not a simple "memorization" by the machine, we should evaluate the performance on the test set. This is easy to do, we simply use the `evaluate` method on our model.
```
loss, accuracy = model.evaluate(x_test_norm, y_test_encoded)
print('test set accuracy: ', accuracy * 100)
```
# Task 8: Predictions
```
preds = model.predict(x_test_norm)
print('shape of preds: ', preds.shape)
```
We probably can't go through all the 10000 predictions for now, but we can take a look at the first few. Let's plot the first few test set images along with their predicted and actual labels and see how our trained model actually performed.
```
plt.figure(figsize = (12, 12))
start_index = 0
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
pred = np.argmax(preds[start_index + i])
actual = np.argmax(y_test_encoded[start_index + i])
col = 'g'
if pred != actual:
col = 'r'
plt.xlabel('i={} | pred={} | true={}'.format(start_index + i, pred, actual), color = col)
plt.imshow(x_test[start_index + i], cmap='binary')
plt.show()
```
It gets most of the predictions right!
```
"""
Enter the index value in place of the value 8 below for the prediction
that you want to plot the probability scores for
"""
index = 8
plt.plot(preds[index])
plt.show()
```
Hopefully this gave you an insight into using Tensorflow and its Keras implementation to get started with training Neural Networks!
|
github_jupyter
|
import tensorflow as tf
print('Using TensorFlow version', tf.__version__)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print('x_train shape: ', x_train.shape)
print('y_train shape: ', y_train.shape)
print('x_test shape: ', x_test.shape)
print('y_test shape: ', y_test.shape)
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(x_train[0], cmap = 'binary')
plt.show()
y_train[0]
y_train[:10]
!pip install tensorflow.python.keras.utils
from keras.utils.np_utils import to_categorical
y_train_encoded = to_categorical(y_train)
y_test_encoded = to_categorical(y_test)
print('y_train shape: ', y_train_encoded.shape)
print('y_test shape: ', y_test_encoded.shape)
y_train_encoded[0]
import numpy as np
x_train_reshaped = np.reshape(x_train, (60000, 784))
x_test_reshaped = np.reshape(x_test, (10000, 784))
print('x_train_reshaped shape: ', x_train_reshaped.shape)
print('x_test_reshaped shape: ', x_test_reshaped.shape)
print(set(x_train_reshaped[0]))
x_mean = np.mean(x_train_reshaped)
x_std = np.std(x_train_reshaped)
print('mean: ', x_mean)
print('std: ', x_std)
epsilon = 1e-10
x_train_norm = (x_train_reshaped - x_mean)/(x_std + epsilon)
x_test_norm = (x_test_reshaped - x_mean)/(x_std + epsilon)
print(set(x_train_norm[0]))
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential([
Dense(128, activation = 'relu', input_shape = (784,)),
Dense(128, activation = 'relu'),
Dense(10, activation = 'softmax')
])
model.compile(
optimizer = 'sgd',
loss = 'categorical_crossentropy',
metrics = ['accuracy']
)
model.summary()
h = model.fit(
x_train_norm,
y_train_encoded,
epochs = 3
)
loss, accuracy = model.evaluate(x_test_norm, y_test_encoded)
print('test set accuracy: ', accuracy * 100)
preds = model.predict(x_test_norm)
print('shape of preds: ', preds.shape)
plt.figure(figsize = (12, 12))
start_index = 0
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
pred = np.argmax(preds[start_index + i])
actual = np.argmax(y_test_encoded[start_index + i])
col = 'g'
if pred != actual:
col = 'r'
plt.xlabel('i={} | pred={} | true={}'.format(start_index + i, pred, actual), color = col)
plt.imshow(x_test[start_index + i], cmap='binary')
plt.show()
"""
Enter the index value in place of the value 8 below for the prediction
that you want to plot the probability scores for
"""
index = 8
plt.plot(preds[index])
plt.show()
| 0.848471 | 0.994949 |
# 4.4.2 分類
```
%matplotlib inline
from sklearn.datasets import load_iris
# Irisデータセットを読み込む
iris = load_iris()
X, y = iris.data, iris.target
# 先頭5行を表示
print('X:')
print(X[:5, :])
print('y:')
print(y[:5])
from sklearn.model_selection import train_test_split
# 学習データとテストデータに分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(123)
# X軸Y軸ともに0空1までの一様分布から100点をサンプリング
X0 = np.random.uniform(size=(100, 2))
# クラス0ラベルを100個生成
y0 = np.repeat(0, 100)
# X軸Y軸ともに-1から0までの一様分布から100点をサンプリング
X1 = np.random.uniform(-1.0, 0.0, size=(100, 2))
# クラス1のラベルを100個生成
y1 = np.repeat(1, 100)
# 散布図にプロット
fig, ax = plt.subplots()
ax.scatter(X0[:, 0], X0[:, 1], marker='o', label='class 0')
ax.scatter(X1[:, 0], X1[:, 1], marker='x', label='class 1')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend()
plt.show()
from sklearn.svm import SVC
# 学習、および決定協会、マージン、サポートベクタを可視化する関数
def plot_boundary_margin_sv(X0, y0, X1, y1, kernel, C, xmin=-1, xmax=1, ymin=-1, ymax=1):
# サポートベクタマシンのインスタンス化
svc = SVC(kernel=kernel, C=C)
# 学習
svc.fit(np.vstack((X0, X1)), np.hstack((y0, y1)))
fig, ax = plt.subplots()
ax.scatter(X0[:, 0], X0[:, 1], marker='o', label='class 0')
ax.scatter(X1[:, 0], X1[:, 1], marker='x', label='class 1')
# 決定協会とマージンをプロット
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 100), np.linspace(ymin, ymax, 100))
xy = np.vstack([xx.ravel(), yy.ravel()]).T
p = svc.decision_function(xy).reshape((100, 100))
ax.contour(xx, yy, p,
colors='k', levels=[-1, 0, 1],
alpha=0.5, linestyles=['--', '-', '--'])
# サポートベクタをプロット
ax.scatter(svc.support_vectors_[:, 0],
svc.support_vectors_[:, 1],
s=250, facecolors='none',
edgecolors='black')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend(loc='best')
plt.show()
plot_boundary_margin_sv(X0, y0, X1, y1, kernel='linear', C=1e6)
plot_boundary_margin_sv(X0, y0, X1, y1, kernel='linear', C=0.1)
np.random.seed(123)
X = np.random.random(size=(100, 2))
y = (X[:, 1] > 2*(X[:, 0]-0.5)**2 +0.5).astype(int)
fig, ax = plt.subplots()
ax.scatter(X[y == 0, 0], X[y ==0, 1], marker='x', label='class 0')
ax.scatter(X[y == 1, 0], X[y == 1, 1], marker='o', label='class 1')
ax.legend()
plt.show()
# 決定境界、マージン、サポートベクタをプロット
X0, X1 = X[y == 0, :], X[y == 1, :]
y0, y1 = y[y == 0], y[y == 1]
plot_boundary_margin_sv(X0, y0, X1, y1, kernel='rbf', C=1e3, xmin=0, ymin=0)
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
# Irisデータセットを読み込む
iris = load_iris()
X, y = iris.data, iris.target
# 学習データセットとテストデータに分割する
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
# 決定木をインスタンス化する (木の最大深さ=3)
tree = DecisionTreeClassifier(max_depth=3)
# 学習
tree.fit(X_train, y_train)
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
# dot形式のデータを抽出
dot_data = export_graphviz(tree, filled=True,
rounded=True,
class_names=['Setosa',
'Versicolor',
'Virigica'],
feature_names=['Speal Length',
'Spal Width',
'Petal Length',
'Petal Width'],
out_file=None)
# 決定木のプロットを出力
graph = graph_from_dot_data(dot_data)
graph.write_png('tree.png')
# 予測
y_pred = tree.predict(X_test)
y_pred
from sklearn.ensemble import RandomForestClassifier
# ランダムフォレストをインスタンス化する
forest = RandomForestClassifier(n_estimators=100, random_state=123)
# 学習
forest.fit(X_train, y_train)
# 予測
y_pred = forest.predict(X_test)
y_pred
```
|
github_jupyter
|
%matplotlib inline
from sklearn.datasets import load_iris
# Irisデータセットを読み込む
iris = load_iris()
X, y = iris.data, iris.target
# 先頭5行を表示
print('X:')
print(X[:5, :])
print('y:')
print(y[:5])
from sklearn.model_selection import train_test_split
# 学習データとテストデータに分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(123)
# X軸Y軸ともに0空1までの一様分布から100点をサンプリング
X0 = np.random.uniform(size=(100, 2))
# クラス0ラベルを100個生成
y0 = np.repeat(0, 100)
# X軸Y軸ともに-1から0までの一様分布から100点をサンプリング
X1 = np.random.uniform(-1.0, 0.0, size=(100, 2))
# クラス1のラベルを100個生成
y1 = np.repeat(1, 100)
# 散布図にプロット
fig, ax = plt.subplots()
ax.scatter(X0[:, 0], X0[:, 1], marker='o', label='class 0')
ax.scatter(X1[:, 0], X1[:, 1], marker='x', label='class 1')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend()
plt.show()
from sklearn.svm import SVC
# 学習、および決定協会、マージン、サポートベクタを可視化する関数
def plot_boundary_margin_sv(X0, y0, X1, y1, kernel, C, xmin=-1, xmax=1, ymin=-1, ymax=1):
# サポートベクタマシンのインスタンス化
svc = SVC(kernel=kernel, C=C)
# 学習
svc.fit(np.vstack((X0, X1)), np.hstack((y0, y1)))
fig, ax = plt.subplots()
ax.scatter(X0[:, 0], X0[:, 1], marker='o', label='class 0')
ax.scatter(X1[:, 0], X1[:, 1], marker='x', label='class 1')
# 決定協会とマージンをプロット
xx, yy = np.meshgrid(np.linspace(xmin, xmax, 100), np.linspace(ymin, ymax, 100))
xy = np.vstack([xx.ravel(), yy.ravel()]).T
p = svc.decision_function(xy).reshape((100, 100))
ax.contour(xx, yy, p,
colors='k', levels=[-1, 0, 1],
alpha=0.5, linestyles=['--', '-', '--'])
# サポートベクタをプロット
ax.scatter(svc.support_vectors_[:, 0],
svc.support_vectors_[:, 1],
s=250, facecolors='none',
edgecolors='black')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend(loc='best')
plt.show()
plot_boundary_margin_sv(X0, y0, X1, y1, kernel='linear', C=1e6)
plot_boundary_margin_sv(X0, y0, X1, y1, kernel='linear', C=0.1)
np.random.seed(123)
X = np.random.random(size=(100, 2))
y = (X[:, 1] > 2*(X[:, 0]-0.5)**2 +0.5).astype(int)
fig, ax = plt.subplots()
ax.scatter(X[y == 0, 0], X[y ==0, 1], marker='x', label='class 0')
ax.scatter(X[y == 1, 0], X[y == 1, 1], marker='o', label='class 1')
ax.legend()
plt.show()
# 決定境界、マージン、サポートベクタをプロット
X0, X1 = X[y == 0, :], X[y == 1, :]
y0, y1 = y[y == 0], y[y == 1]
plot_boundary_margin_sv(X0, y0, X1, y1, kernel='rbf', C=1e3, xmin=0, ymin=0)
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
# Irisデータセットを読み込む
iris = load_iris()
X, y = iris.data, iris.target
# 学習データセットとテストデータに分割する
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)
# 決定木をインスタンス化する (木の最大深さ=3)
tree = DecisionTreeClassifier(max_depth=3)
# 学習
tree.fit(X_train, y_train)
from pydotplus import graph_from_dot_data
from sklearn.tree import export_graphviz
# dot形式のデータを抽出
dot_data = export_graphviz(tree, filled=True,
rounded=True,
class_names=['Setosa',
'Versicolor',
'Virigica'],
feature_names=['Speal Length',
'Spal Width',
'Petal Length',
'Petal Width'],
out_file=None)
# 決定木のプロットを出力
graph = graph_from_dot_data(dot_data)
graph.write_png('tree.png')
# 予測
y_pred = tree.predict(X_test)
y_pred
from sklearn.ensemble import RandomForestClassifier
# ランダムフォレストをインスタンス化する
forest = RandomForestClassifier(n_estimators=100, random_state=123)
# 学習
forest.fit(X_train, y_train)
# 予測
y_pred = forest.predict(X_test)
y_pred
| 0.504394 | 0.907107 |
# Inaugural Project
Imports and set magics:
```
import numpy as np
import itertools as it
import matplotlib.pylab as plt
from scipy import optimize
# autoreload modules when code is run
%load_ext autoreload
%autoreload 2
# local modules
import inauguralproject
```
# Question 1
```
#In this question, we want to construct a function which solves equation (1).
#The utility function is defined by:
def utility(w, l, m = 1, v = 10, e = 0.3, t0 = 0.4, t1 = 0.1, k = 0.4):
tax = t0*w*l+t1*np.fmax(w*l-k,0)
c = m+w*l-tax
utility = np.log(c)-v*l**(1+1/e)/(1+1/e)
return utility,c, tax
#Next a function is created in order to maximize utility:
def solveconsumerproblem(w, m = 1, v = 10, e = 0.3, t0 = 0.4, t1 = 0.1, k = 0.4,\
N=100, callableoutput=False, tax=False):
#Now, we want to create a set of emply local lists in order to store the outputs.
c_star = [0]
l_star = [0]
t_star = [0]
utility_star = [-np.inf]
# Creating a range for labour in order to examine the utility for a given number of elements.
Range = np.linspace(0, 1, N)
for i in Range:
utility_temp = utility(l = i, w = w, e = e, t0 = t0, t1 = t1, k = k)
if utility_temp[0]>utility_star:
utility_star[0]=utility_temp[0]
l_star[0] = i
c_star[0] = utility_temp[1]
t_star[0] = utility_temp[2]
#Setting requirements for tax, whether to display or not
if tax == False:
if callableoutput == False:
print(f"It is found that (l*,c*) = ({l_star[0]:.3}, {c_star[0]:.3}) meaning that the optimal utility is given by u(c*,l*) = {utility_star[0]:.3}")
else:
return utility_star[0], l_star[0], c_star[0]
if tax == True:
return utility_star[0], l_star[0], c_star[0], t_star[0]
solveconsumerproblem(w=0.6, tax=False, callableoutput=False)
```
# Question 2
```
#Now we want to plot l* and c* as functions of w in the range 0.5 to 1.5
#In order to do so, we first define the range of w as:
w_range = np.linspace(0.5, 1.5, 200)
#As before, we create empty lists for later use:
l_list = []
c_list = []
#Lastly, we loop the w_range as follows:
for i in w_range:
l_list.append(solveconsumerproblem(w = i, callableoutput=True)[1])
c_list.append(solveconsumerproblem(w = i, callableoutput=True)[2])
#Now, the figures are ready to be created. This is done by:
fig = plt.figure(figsize=(30,7.5))
plt.style.use('seaborn-whitegrid')
# first plot including the labor supply depending on the wage
plot_left = fig.add_subplot(1,2,1)
plot_left.plot(w_range,l_list)
plot_left.set_title("Labour depending on wage",fontsize=20)
plot_left.set_xlabel("Wage",fontsize=15)
plot_left.set_ylabel("Labour supply",fontsize=15)
plot_left.axes.tick_params(labelsize=15)
# second plot including the consumptions depending on the wage
plot_right = fig.add_subplot(1,2,2)
plot_right.plot(w_range,c_list)
plot_right.set_title("Consumption depending on wage",fontsize=20)
plot_right.set_xlabel("Wage",fontsize=15)
plot_right.set_ylabel("Optimal consumption",fontsize=15)
plot_right.axes.tick_params(labelsize=15)
```
# Question 3
```
#In order to calculate the tax revenue, we need to find the wage-level for different types of consumers. This is done by following:
np.random.seed(seed = 1337)
wage_list = np.random.uniform(low = 0.5, high = 1.5, size = 10000)
#Next, we wish to find the tax-revenue for a set of different wage-levels and parameters as:
def taxrevenue(wages, t0, t1, k, e = 0.3):
#Again, an empty list is generated for output:
tax_payments = []
#As seen before, we want to create a loop for wages, adding the indiviaual tax element in the function.
for i in wages:
tax_payments.append(solveconsumerproblem(w = i, t0=t0, t1=t1, e=e, k=k, callableoutput = True, tax = True)[-1])
#The sum of all calculated individual taxpayments are used in order to find the aggregated tax revenue:
return np.sum(tax_payments)
#Last, we find the total tax revenue as:
print("The aggregated tax revenue is calculated as " + \
str(round(taxrevenue(wages = wage_list, t0=0.4, t1=0.1, k=0.4, e=0.3),2)))
```
# Question 4
```
#Now, we wish to examine the aggregated tax revenue for epsilon=0.1. This is solely done by running the above function, setting epsilon=0.1 as follows:
print("For epsilon=0.1, the aggregated tax revenue is " + \
str(round(taxrevenue(wages = wage_list, t0=0.4, t1=0.1, k=0.4, e=0.1),2)))
```
# Question 5
```
#In order to maximize the tax revenue, we wish to find the optimal t0, t1 and k using solvetax
#First, setting parameters and defining model once again:
e = 0.3
x = [0,0,0]
cl = (1,1)
w = 1
#We will then define the function who calculates the total tax (revenue)
def totaltax(w, cl, e, t0, t1, k) :
global TTR
TTR = 0
np.random.seed(1000)
#Tax revenue
def T(t0, t1, k, w, l):
return t0*w*l+t1*max(w*l-k,0)
#Setting a uniform distribution of w
w_rand = np.random.uniform(0.5,1.5,size = 10)
#Total tax revenue is then calculated by:
for wrand in w_rand:
sol = sol(e,t0,t1,k,wrand,cl)
ltax = sol.x[1]
TTR = TTR + T(t0, t1, k, wrand, ltax)
return TTR
#Solving the above
def solvetax(t0, t1, k, e, w, cl):
#Setting a list of the parameters as follows
def obj(x):
t0 = x[0]
t1 = x[1]
k = x[2]
return -totaltax(t0, t1, k, e, w, cl)
#Setting initial_guess
initial_guess = np.array([0,0,0])
#Solver
optimaltax = optimize.minimize(obj,initial_guess, method='Nelder-Mead')
return optimaltax
solved =solvetax(e, w, cl)
#The results are found by running:
#print(optimaltax)
#print(solvedtax.x[0])
#print(solvedtax.x[1])
#print(solvedtax.x[2])
#fig = plt.figure(figsize=(10,4))
#ax = fig.add_subplot(1,2,1)
#ax.plot(m2_vec,c_vec)
#ax.set_xlabel('$m_2$')
#ax.set_ylabel('$c$')
#ax.set_title('consumption function in period 2')
```
# Conclusion
|
github_jupyter
|
import numpy as np
import itertools as it
import matplotlib.pylab as plt
from scipy import optimize
# autoreload modules when code is run
%load_ext autoreload
%autoreload 2
# local modules
import inauguralproject
#In this question, we want to construct a function which solves equation (1).
#The utility function is defined by:
def utility(w, l, m = 1, v = 10, e = 0.3, t0 = 0.4, t1 = 0.1, k = 0.4):
tax = t0*w*l+t1*np.fmax(w*l-k,0)
c = m+w*l-tax
utility = np.log(c)-v*l**(1+1/e)/(1+1/e)
return utility,c, tax
#Next a function is created in order to maximize utility:
def solveconsumerproblem(w, m = 1, v = 10, e = 0.3, t0 = 0.4, t1 = 0.1, k = 0.4,\
N=100, callableoutput=False, tax=False):
#Now, we want to create a set of emply local lists in order to store the outputs.
c_star = [0]
l_star = [0]
t_star = [0]
utility_star = [-np.inf]
# Creating a range for labour in order to examine the utility for a given number of elements.
Range = np.linspace(0, 1, N)
for i in Range:
utility_temp = utility(l = i, w = w, e = e, t0 = t0, t1 = t1, k = k)
if utility_temp[0]>utility_star:
utility_star[0]=utility_temp[0]
l_star[0] = i
c_star[0] = utility_temp[1]
t_star[0] = utility_temp[2]
#Setting requirements for tax, whether to display or not
if tax == False:
if callableoutput == False:
print(f"It is found that (l*,c*) = ({l_star[0]:.3}, {c_star[0]:.3}) meaning that the optimal utility is given by u(c*,l*) = {utility_star[0]:.3}")
else:
return utility_star[0], l_star[0], c_star[0]
if tax == True:
return utility_star[0], l_star[0], c_star[0], t_star[0]
solveconsumerproblem(w=0.6, tax=False, callableoutput=False)
#Now we want to plot l* and c* as functions of w in the range 0.5 to 1.5
#In order to do so, we first define the range of w as:
w_range = np.linspace(0.5, 1.5, 200)
#As before, we create empty lists for later use:
l_list = []
c_list = []
#Lastly, we loop the w_range as follows:
for i in w_range:
l_list.append(solveconsumerproblem(w = i, callableoutput=True)[1])
c_list.append(solveconsumerproblem(w = i, callableoutput=True)[2])
#Now, the figures are ready to be created. This is done by:
fig = plt.figure(figsize=(30,7.5))
plt.style.use('seaborn-whitegrid')
# first plot including the labor supply depending on the wage
plot_left = fig.add_subplot(1,2,1)
plot_left.plot(w_range,l_list)
plot_left.set_title("Labour depending on wage",fontsize=20)
plot_left.set_xlabel("Wage",fontsize=15)
plot_left.set_ylabel("Labour supply",fontsize=15)
plot_left.axes.tick_params(labelsize=15)
# second plot including the consumptions depending on the wage
plot_right = fig.add_subplot(1,2,2)
plot_right.plot(w_range,c_list)
plot_right.set_title("Consumption depending on wage",fontsize=20)
plot_right.set_xlabel("Wage",fontsize=15)
plot_right.set_ylabel("Optimal consumption",fontsize=15)
plot_right.axes.tick_params(labelsize=15)
#In order to calculate the tax revenue, we need to find the wage-level for different types of consumers. This is done by following:
np.random.seed(seed = 1337)
wage_list = np.random.uniform(low = 0.5, high = 1.5, size = 10000)
#Next, we wish to find the tax-revenue for a set of different wage-levels and parameters as:
def taxrevenue(wages, t0, t1, k, e = 0.3):
#Again, an empty list is generated for output:
tax_payments = []
#As seen before, we want to create a loop for wages, adding the indiviaual tax element in the function.
for i in wages:
tax_payments.append(solveconsumerproblem(w = i, t0=t0, t1=t1, e=e, k=k, callableoutput = True, tax = True)[-1])
#The sum of all calculated individual taxpayments are used in order to find the aggregated tax revenue:
return np.sum(tax_payments)
#Last, we find the total tax revenue as:
print("The aggregated tax revenue is calculated as " + \
str(round(taxrevenue(wages = wage_list, t0=0.4, t1=0.1, k=0.4, e=0.3),2)))
#Now, we wish to examine the aggregated tax revenue for epsilon=0.1. This is solely done by running the above function, setting epsilon=0.1 as follows:
print("For epsilon=0.1, the aggregated tax revenue is " + \
str(round(taxrevenue(wages = wage_list, t0=0.4, t1=0.1, k=0.4, e=0.1),2)))
#In order to maximize the tax revenue, we wish to find the optimal t0, t1 and k using solvetax
#First, setting parameters and defining model once again:
e = 0.3
x = [0,0,0]
cl = (1,1)
w = 1
#We will then define the function who calculates the total tax (revenue)
def totaltax(w, cl, e, t0, t1, k) :
global TTR
TTR = 0
np.random.seed(1000)
#Tax revenue
def T(t0, t1, k, w, l):
return t0*w*l+t1*max(w*l-k,0)
#Setting a uniform distribution of w
w_rand = np.random.uniform(0.5,1.5,size = 10)
#Total tax revenue is then calculated by:
for wrand in w_rand:
sol = sol(e,t0,t1,k,wrand,cl)
ltax = sol.x[1]
TTR = TTR + T(t0, t1, k, wrand, ltax)
return TTR
#Solving the above
def solvetax(t0, t1, k, e, w, cl):
#Setting a list of the parameters as follows
def obj(x):
t0 = x[0]
t1 = x[1]
k = x[2]
return -totaltax(t0, t1, k, e, w, cl)
#Setting initial_guess
initial_guess = np.array([0,0,0])
#Solver
optimaltax = optimize.minimize(obj,initial_guess, method='Nelder-Mead')
return optimaltax
solved =solvetax(e, w, cl)
#The results are found by running:
#print(optimaltax)
#print(solvedtax.x[0])
#print(solvedtax.x[1])
#print(solvedtax.x[2])
#fig = plt.figure(figsize=(10,4))
#ax = fig.add_subplot(1,2,1)
#ax.plot(m2_vec,c_vec)
#ax.set_xlabel('$m_2$')
#ax.set_ylabel('$c$')
#ax.set_title('consumption function in period 2')
| 0.361616 | 0.849597 |

Sponsored by the BYU PCCL Lab.
> AI Dungeon 2 is a completely AI generated text adventure built with OpenAI's largest GPT-2 model. It's a first of it's kind game that allows you to enter and will react to any action you can imagine.
# What is this?
Google Colab is a way to experience machine learning for free. Google provides GPUs that you can run code in. Because this game exploded however, Google likely won't be able to allow free usage of it for AI Dungeon for very long. We are almost done making an app version of the game where you will be able to play AI Dungeon 2. Until that's released you can still play the game here.
# Main mirrors of AI Dungeon 2 are currently down due to high download costs.
We are using bittorrent as a temporary solution to host game files and keep this game alive. It's not fast, but it's the best we've got right now.
If you want to help, best thing you can do is to **[download this torrent file with game files](https://github.com/nickwalton/AIDungeon/files/3935881/model_v5.torrent.zip)** and **seed it** indefinitely to the best of your ability. This will help new players download this game faster, and discover the vast worlds of AIDungeon2!
- <a href="https://twitter.com/nickwalton00?ref_src=twsrc%5Etfw" class="twitter-follow-button" data-show-count="false">Follow @nickwalton00</a> on Twitter for updates on when it will be available again.
- **[Support AI Dungeon 2](https://www.patreon.com/AIDungeon) on Patreon to help me to continue improving the game with all the awesome ideas I have for its future!**
## How to play
1. Click "Tools"-> "Settings..." -> "Theme" -> "Dark" (optional but recommended)
2. Go to **Main Game** section below
3. Run Install block
3. Run Download Model block
4. It will then take a couple minutes to boot up as the model is downloaded loaded onto the GPU.
5. Run the game block
6. If you have questions about getting it to work then please [go to github repo](https://github.com/AIDungeon/AIDungeon) to get help.
## About
- While you wait you can [read adventures others have had](https://aidungeon.io/)
- [Read more](https://pcc.cs.byu.edu/2019/11/21/ai-dungeon-2-creating-infinitely-generated-text-adventures-with-deep-learning-language-models/) about how AI Dungeon 2 is made.
- **[Support AI Dungeon 2](https://www.patreon.com/bePatron?u=19115449) on Patreon to help me to continue improving the game with all the awesome ideas I have for its future!**
# Main Game
```
# Install
!git clone --depth 1 --branch master https://github.com/AIDungeon/AIDungeon/
%cd AIDungeon
!./install.sh
print("Installation Complete!")
# Download model from torrent:
!./download_model.sh
from IPython.display import clear_output
clear_output()
print("Download Complete!")
# Play
from IPython.display import Javascript
display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 5000})'''))
!python play.py
```
# Utilities (Persistent Save / Load, OOM Fix)
```
# RUN THIS FIRST before running any block below.
# This block mount Google Drive to our workspace
# so we can save to and load from it!
import pathlib
from distutils.dir_util import copy_tree
from google.colab import drive
drive.mount('/content/drive')
drive_stories_directory="/content/drive/My Drive/AIDungeon/saved_stories"
colab_stories_directory="/content/AIDungeon/saved_stories"
drive_model_directory="/content/drive/My Drive/Data/model_v5"
colab_model_directory="/content/AIDungeon/generator/gpt2/models/model_v5"
pathlib.Path(drive_stories_directory).mkdir(parents=True, exist_ok=True)
# Save stories to your Google Drive
copy_tree(
colab_stories_directory,
drive_stories_directory
)
# Load stories from your Google Drive
copy_tree(
drive_stories_directory,
colab_stories_directory
)
# Backup model from Colab to Google Drive. Requires 6.5GB of space!
copy_tree(
colab_model_directory,
drive_model_directory
)
# Copy model from Google Drive. Make sure the model is uploaded to your personal Drive.
# It should resides in a Data folder. The path is: /Data/model_v5/
copy_tree(
drive_model_directory,
colab_model_directory
)
# If you get an OOM (out of memory error, random crashes)
# you might want to increase the available RAM.
# To do so, run this block. Wait until it crashes
# and a little message will pops up asking if
# you'd like to increase the available memory. Say yes and run the game.
# Credit goes to bpseudopod for figuring this out.
# Source: https://www.reddit.com/r/AIDungeon/comments/e782oi/tips_for_crash_prevention/
d = []
while True:
d.append(1)
```
|
github_jupyter
|
# Install
!git clone --depth 1 --branch master https://github.com/AIDungeon/AIDungeon/
%cd AIDungeon
!./install.sh
print("Installation Complete!")
# Download model from torrent:
!./download_model.sh
from IPython.display import clear_output
clear_output()
print("Download Complete!")
# Play
from IPython.display import Javascript
display(Javascript('''google.colab.output.setIframeHeight(0, true, {maxHeight: 5000})'''))
!python play.py
# RUN THIS FIRST before running any block below.
# This block mount Google Drive to our workspace
# so we can save to and load from it!
import pathlib
from distutils.dir_util import copy_tree
from google.colab import drive
drive.mount('/content/drive')
drive_stories_directory="/content/drive/My Drive/AIDungeon/saved_stories"
colab_stories_directory="/content/AIDungeon/saved_stories"
drive_model_directory="/content/drive/My Drive/Data/model_v5"
colab_model_directory="/content/AIDungeon/generator/gpt2/models/model_v5"
pathlib.Path(drive_stories_directory).mkdir(parents=True, exist_ok=True)
# Save stories to your Google Drive
copy_tree(
colab_stories_directory,
drive_stories_directory
)
# Load stories from your Google Drive
copy_tree(
drive_stories_directory,
colab_stories_directory
)
# Backup model from Colab to Google Drive. Requires 6.5GB of space!
copy_tree(
colab_model_directory,
drive_model_directory
)
# Copy model from Google Drive. Make sure the model is uploaded to your personal Drive.
# It should resides in a Data folder. The path is: /Data/model_v5/
copy_tree(
drive_model_directory,
colab_model_directory
)
# If you get an OOM (out of memory error, random crashes)
# you might want to increase the available RAM.
# To do so, run this block. Wait until it crashes
# and a little message will pops up asking if
# you'd like to increase the available memory. Say yes and run the game.
# Credit goes to bpseudopod for figuring this out.
# Source: https://www.reddit.com/r/AIDungeon/comments/e782oi/tips_for_crash_prevention/
d = []
while True:
d.append(1)
| 0.379608 | 0.784814 |
# Advanced: Extending lambeq
## Creating readers
### [Reader](../lambeq.rst#lambeq.reader.Reader) example: "Comb" reader
In this example we will create a reader that, given a sentence, it generates the following tensor network:
<center>
<img src="attachment:linear-2.png" alt="drawing" width="300" style="margin: 20px 2px 2px 2px;"/>
</center>
Note that the particular compositional model is not appropriate for classical experiments, since the tensor that implements the layer can become very large for long sentences. However, the model can be implemented without problems on a quantum computer.
```
from lambeq.reader import Reader
from lambeq.core.types import AtomicType
from discopy import Box, Id, Word
N = AtomicType.NOUN
class CombReader(Reader):
def sentence2diagram(self, sentence):
words = Id().tensor(*[Word(w, N) for w in sentence.split()])
layer = Box('LAYER', words.cod, N)
return words >> layer
diagram = CombReader().sentence2diagram('John gave Mary a flower')
diagram.draw()
```
```
Id().tensor(*[Word(w, N) for w in ['John', 'gave', 'Mary', 'a', 'flower']]).draw()
```
## Creating rewrite rules
```
import warnings
warnings.filterwarnings('ignore') # Ignore warnings
from lambeq.ccg2discocat import DepCCGParser
parser = DepCCGParser()
d = parser.sentence2diagram('The food is fresh')
```
### [SimpleRewriteRule](../lambeq.rst#lambeq.rewrite.SimpleRewriteRule) example: Negation functor
```
from lambeq.rewrite import SimpleRewriteRule
from lambeq.core.types import AtomicType
from discopy.rigid import Box, Id
N = AtomicType.NOUN
S = AtomicType.SENTENCE
adj = N @ N.l
NOT = Box('NOT', S, S)
negation_rewrite = SimpleRewriteRule(
cod=N.r @ S @ S.l @ N,
template=SimpleRewriteRule.placeholder(N.r @ S @ S.l @ N) >> Id(N.r) @ NOT @ Id(S.l @ N),
words=['is', 'was', 'has', 'have'])
```
```
from lambeq.rewrite import Rewriter
from discopy import drawing
not_d = Rewriter([negation_rewrite])(d)
drawing.equation(d, not_d, symbol='->', figsize=(14, 4))
```
### [RewriteRule](../lambeq.rst#lambeq.rewrite.RewriteRule) example: "Past" functor
```
from lambeq.rewrite import RewriteRule
class PastRewriteRule(RewriteRule):
mapping = {
'is': 'was',
'are': 'were',
'has': 'had'
}
def matches(self, box):
return box.name in self.mapping
def rewrite(self, box):
new_name = self.mapping[box.name]
return type(box)(name=new_name, dom=box.dom, cod=box.cod)
past_d = Rewriter([PastRewriteRule()])(d)
drawing.equation(d, past_d, symbol='->', figsize=(14, 4))
```
## Creating ansätze
```
d = parser.sentence2diagram('We will go')
```
### [CircuitAnsatz](../lambeq.rst#lambeq.circuit.CircuitAnsatz) example: "Real-valued" ansatz
```
from lambeq.circuit import CircuitAnsatz
from discopy.quantum.circuit import Functor, Id
from discopy.quantum.gates import Bra, CX, Ket, Ry
from lambeq.ansatz import Symbol
class RealAnsatz(CircuitAnsatz):
def __init__(self, ob_map, n_layers):
super().__init__(ob_map=ob_map, n_layers=n_layers)
self.n_layers = n_layers
self.functor = Functor(ob=self.ob_map, ar=self._ar)
def _ar(self, box):
# step 1: obtain label
label = self._summarise_box(box)
# step 2: map domain and codomain
dom, cod = self._ob(box.dom), self._ob(box.cod)
n_qubits = max(dom, cod)
n_layers = self.n_layers
# step 3: construct and return ansatz
if n_qubits == 1:
circuit = Ry(Symbol(f'{label}_0'))
else:
# this also deals with the n_qubits == 0 case correctly
circuit = Id(n_qubits)
for i in range(n_layers):
offset = i * n_qubits
syms = [Symbol(f'{label}_{offset + j}') for j in range(n_qubits)]
# adds a ladder of CNOTs
for j in range(n_qubits - 1):
circuit >>= Id(j) @ CX @ Id(n_qubits - j - 2)
# adds a layer of Y rotations
circuit >>= Id().tensor(*[Ry(sym) for sym in syms])
if cod <= dom:
circuit >>= Id(cod) @ Bra(*[0]*(dom - cod))
else:
circuit <<= Id(dom) @ Ket(*[0]*(cod - dom))
return circuit
real_d = RealAnsatz({N: 1, S: 1}, n_layers=2)(d)
real_d.draw(figsize=(12, 10))
```
### [TensorAnsatz](../lambeq.rst#lambeq.tensor.TensorAnsatz) example: "Positive" ansatz
```
from lambeq.tensor import TensorAnsatz
from discopy import rigid, tensor
from functools import reduce
class PositiveAnsatz(TensorAnsatz):
def _ar(self, box):
# step 1: obtain label
label = self._summarise_box(box)
# step 2: map domain and codomain
dom, cod = self._ob(box.dom), self._ob(box.cod)
# step 3: construct and return ansatz
name = self._summarise_box(box)
n_params = reduce(lambda x, y: x * y, dom @ cod, 1)
syms = Symbol(name, size=n_params)
return tensor.Box(box.name, dom, cod, syms ** 2)
from discopy import Dim
ansatz = PositiveAnsatz({N: Dim(2), S: Dim(2)})
positive_d = ansatz(d)
positive_d.draw()
import numpy as np
from sympy import default_sort_key
syms = sorted(positive_d.free_symbols, key=default_sort_key)
sym_dict = {k: -np.ones(k.size) for k in syms}
subbed_diagram = positive_d.lambdify(*syms)(*sym_dict.values())
subbed_diagram.eval()
```
## Contributions
|
github_jupyter
|
from lambeq.reader import Reader
from lambeq.core.types import AtomicType
from discopy import Box, Id, Word
N = AtomicType.NOUN
class CombReader(Reader):
def sentence2diagram(self, sentence):
words = Id().tensor(*[Word(w, N) for w in sentence.split()])
layer = Box('LAYER', words.cod, N)
return words >> layer
diagram = CombReader().sentence2diagram('John gave Mary a flower')
diagram.draw()
Id().tensor(*[Word(w, N) for w in ['John', 'gave', 'Mary', 'a', 'flower']]).draw()
import warnings
warnings.filterwarnings('ignore') # Ignore warnings
from lambeq.ccg2discocat import DepCCGParser
parser = DepCCGParser()
d = parser.sentence2diagram('The food is fresh')
from lambeq.rewrite import SimpleRewriteRule
from lambeq.core.types import AtomicType
from discopy.rigid import Box, Id
N = AtomicType.NOUN
S = AtomicType.SENTENCE
adj = N @ N.l
NOT = Box('NOT', S, S)
negation_rewrite = SimpleRewriteRule(
cod=N.r @ S @ S.l @ N,
template=SimpleRewriteRule.placeholder(N.r @ S @ S.l @ N) >> Id(N.r) @ NOT @ Id(S.l @ N),
words=['is', 'was', 'has', 'have'])
from lambeq.rewrite import Rewriter
from discopy import drawing
not_d = Rewriter([negation_rewrite])(d)
drawing.equation(d, not_d, symbol='->', figsize=(14, 4))
from lambeq.rewrite import RewriteRule
class PastRewriteRule(RewriteRule):
mapping = {
'is': 'was',
'are': 'were',
'has': 'had'
}
def matches(self, box):
return box.name in self.mapping
def rewrite(self, box):
new_name = self.mapping[box.name]
return type(box)(name=new_name, dom=box.dom, cod=box.cod)
past_d = Rewriter([PastRewriteRule()])(d)
drawing.equation(d, past_d, symbol='->', figsize=(14, 4))
d = parser.sentence2diagram('We will go')
from lambeq.circuit import CircuitAnsatz
from discopy.quantum.circuit import Functor, Id
from discopy.quantum.gates import Bra, CX, Ket, Ry
from lambeq.ansatz import Symbol
class RealAnsatz(CircuitAnsatz):
def __init__(self, ob_map, n_layers):
super().__init__(ob_map=ob_map, n_layers=n_layers)
self.n_layers = n_layers
self.functor = Functor(ob=self.ob_map, ar=self._ar)
def _ar(self, box):
# step 1: obtain label
label = self._summarise_box(box)
# step 2: map domain and codomain
dom, cod = self._ob(box.dom), self._ob(box.cod)
n_qubits = max(dom, cod)
n_layers = self.n_layers
# step 3: construct and return ansatz
if n_qubits == 1:
circuit = Ry(Symbol(f'{label}_0'))
else:
# this also deals with the n_qubits == 0 case correctly
circuit = Id(n_qubits)
for i in range(n_layers):
offset = i * n_qubits
syms = [Symbol(f'{label}_{offset + j}') for j in range(n_qubits)]
# adds a ladder of CNOTs
for j in range(n_qubits - 1):
circuit >>= Id(j) @ CX @ Id(n_qubits - j - 2)
# adds a layer of Y rotations
circuit >>= Id().tensor(*[Ry(sym) for sym in syms])
if cod <= dom:
circuit >>= Id(cod) @ Bra(*[0]*(dom - cod))
else:
circuit <<= Id(dom) @ Ket(*[0]*(cod - dom))
return circuit
real_d = RealAnsatz({N: 1, S: 1}, n_layers=2)(d)
real_d.draw(figsize=(12, 10))
from lambeq.tensor import TensorAnsatz
from discopy import rigid, tensor
from functools import reduce
class PositiveAnsatz(TensorAnsatz):
def _ar(self, box):
# step 1: obtain label
label = self._summarise_box(box)
# step 2: map domain and codomain
dom, cod = self._ob(box.dom), self._ob(box.cod)
# step 3: construct and return ansatz
name = self._summarise_box(box)
n_params = reduce(lambda x, y: x * y, dom @ cod, 1)
syms = Symbol(name, size=n_params)
return tensor.Box(box.name, dom, cod, syms ** 2)
from discopy import Dim
ansatz = PositiveAnsatz({N: Dim(2), S: Dim(2)})
positive_d = ansatz(d)
positive_d.draw()
import numpy as np
from sympy import default_sort_key
syms = sorted(positive_d.free_symbols, key=default_sort_key)
sym_dict = {k: -np.ones(k.size) for k in syms}
subbed_diagram = positive_d.lambdify(*syms)(*sym_dict.values())
subbed_diagram.eval()
| 0.755817 | 0.923592 |
# Spike Calibration
The accuracy of the double spike technique depends crucially on how well the double spike is calibrated. This notebook illustrates one method of calibration which uses measurements of mixtures of the double spike with a known standard material. See Figure 6 of [Klaver and Coath 2019, Geostand. Geoanal. Res.](https://doi.org/10.1111/ggr.12248) and Appendix F of the manuscript for further details.
```
import doublespike as ds
import numpy as np
import matplotlib.pyplot as plt
```
As an example we will consider Fe isotopes, with a $^{57}$Fe-$^{58}$Fe double spike.
```
isodata_fe = ds.IsoData("Fe")
true_spike = [0.0025, 0.1060, 0.4413, 0.4502]
```
## Monte Carlo simulated data
We will use Monte Carlo simulation to produce some fictious data to use for the calibration. We first measure the pure double spike, taking an average over 100 integrations.
```
n = 100
spike_measurements = ds.monterun(isodata_fe, 1.0, true_spike, alpha=0.0, beta=0.8, n=n)
spike_measurement = np.mean(spike_measurements, axis=0)
print(spike_measurement)
```
Now we will take a series of mixtures of spike and standard, again averaging over 100 integrations.
```
mixture_props = [0.3, 0.5, 0.7]
mixture_betas = [2.0, 1.5, 1.7]
mixture_measurements = [
ds.monterun(isodata_fe, prop, true_spike, alpha=0.0, beta=beta, n=n)
for prop, beta in zip(mixture_props, mixture_betas)
]
mixture_measurement = [np.mean(m, axis=0) for m in mixture_measurements]
mixture_measurement = np.vstack(mixture_measurement)
print(mixture_measurement)
```
## Performing the calibration
We now perform the calibration using both the pure spike measurement and the spike-standard mixture measurements as input. The calibration routine minimises the chi-squared misfit between data and model-predicted isotopic ratios, i.e. minimising
$$ \chi^2 = \sum (\mathbf{m}_\text{obs} - \mathbf{m}_\text{model})^T \; \mathbf{W}\; (\mathbf{m}_\text{obs} - \mathbf{m}_\text{model}) $$
where the weight matrix $\mathbf{W}$ is chosen as the inverse of the expected covariance matrix for `errormodel['measured']`. It may be possible to improve the calibration by adjusting the weighting to be more appropriate for the data of interest.
The calibration routine returns the composition of the calibrated spike, estimates of the mixing proportions of spike and standard, the instrumental fractionation factors for both the spike measurements and the spike-standard mixture measurements, the chi-squared misfit, and the degrees of freedom for the chi-squared statistic.
```
cal = ds.spike_calibration(isodata_fe, spike_measurement, mixture_measurement)
print(cal)
```
Since in this example we know the true composition of the spike, we can see how well the calibration routine has recovered the true composition of the spike:
```
calibrated_spike = cal["calibrated_spike"]
print("True spike:", true_spike)
print("Calibrated spike:", calibrated_spike)
print("Difference:", calibrated_spike - true_spike)
```
# Validation of the calibration
One way of checking the calibration is to run the double spike inversion on data produced from a series of mixtures of spike and standard in different proportions. Here we use Monte Carlo simulation to produce more fictious data for a series of such mixtures
```
n = 1000
mixture_props = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
mixture_betas = 1.1 * np.ones_like(mixture_props)
measured = [
ds.monterun(isodata_fe, prop, true_spike, alpha=0.0, beta=beta, n=n)
for prop, beta in zip(mixture_props, mixture_betas)
]
```
Now we run this fictious data through the double spike inversion to estimate the natural fractionation factor $\alpha$. The true value of $\alpha$ should be zero as we are measuring mixtures of the standard and the spike.
```
data = [ds.dsinversion(isodata_fe, m, calibrated_spike) for m in measured]
alpha = [np.mean(d["alpha"]) for d in data]
prop = [np.mean(d["prop"]) for d in data]
alpha_err = [np.std(d["alpha"]) for d in data]
print(alpha)
```
We can plot the recovered $\alpha$ values as a function of the proportion of spike in the mixture. Here there is no systematic trend in mean $\alpha$ with proportion of spike. One way of identifying a poorly-calibrated double spike is to see a systematic variation of $\alpha$ with proportion of spike in the mixture.
```
plt.errorbar(prop, alpha, yerr=alpha_err, fmt="o")
plt.xlabel("proportion of double spike in double spike-sample mix")
plt.ylabel(r"Recovered $\alpha$");
```
We can also compare the errors in $\alpha$ obtained from the inversion with those expected from the linear error propagation. In this case we have an excellent match as the Monte-Carlo simulations are based on exactly the same error model as the linear error propagation.
```
ds.errorcurve(isodata_fe, calibrated_spike)
plt.plot(prop, alpha_err, "o");
```
|
github_jupyter
|
import doublespike as ds
import numpy as np
import matplotlib.pyplot as plt
isodata_fe = ds.IsoData("Fe")
true_spike = [0.0025, 0.1060, 0.4413, 0.4502]
n = 100
spike_measurements = ds.monterun(isodata_fe, 1.0, true_spike, alpha=0.0, beta=0.8, n=n)
spike_measurement = np.mean(spike_measurements, axis=0)
print(spike_measurement)
mixture_props = [0.3, 0.5, 0.7]
mixture_betas = [2.0, 1.5, 1.7]
mixture_measurements = [
ds.monterun(isodata_fe, prop, true_spike, alpha=0.0, beta=beta, n=n)
for prop, beta in zip(mixture_props, mixture_betas)
]
mixture_measurement = [np.mean(m, axis=0) for m in mixture_measurements]
mixture_measurement = np.vstack(mixture_measurement)
print(mixture_measurement)
cal = ds.spike_calibration(isodata_fe, spike_measurement, mixture_measurement)
print(cal)
calibrated_spike = cal["calibrated_spike"]
print("True spike:", true_spike)
print("Calibrated spike:", calibrated_spike)
print("Difference:", calibrated_spike - true_spike)
n = 1000
mixture_props = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
mixture_betas = 1.1 * np.ones_like(mixture_props)
measured = [
ds.monterun(isodata_fe, prop, true_spike, alpha=0.0, beta=beta, n=n)
for prop, beta in zip(mixture_props, mixture_betas)
]
data = [ds.dsinversion(isodata_fe, m, calibrated_spike) for m in measured]
alpha = [np.mean(d["alpha"]) for d in data]
prop = [np.mean(d["prop"]) for d in data]
alpha_err = [np.std(d["alpha"]) for d in data]
print(alpha)
plt.errorbar(prop, alpha, yerr=alpha_err, fmt="o")
plt.xlabel("proportion of double spike in double spike-sample mix")
plt.ylabel(r"Recovered $\alpha$");
ds.errorcurve(isodata_fe, calibrated_spike)
plt.plot(prop, alpha_err, "o");
| 0.456168 | 0.993704 |
# <font color='red'>Maximize Flight Profit</font>
1. Here we are using linear programming to maximizing the profit
2. First we are iportiong the required libraries
3. Read all the text files required for the calculation
4. For data [flight data](http://mgmt.iisc.ac.in/~parthar/)
5. ppt is given to know more about the problem
```
import pandas as pd
import numpy as np
from scipy.optimize import linprog
from numpy.linalg import solve
demand = pd.read_csv('demand.txt', delimiter="\t", header=None,names=["itinerary_name", "demand"])
capacity = pd.read_csv('capacity.txt', delimiter="\t", header=None,names=["leg_name", "capacity"])
fare = pd.read_csv('fare.txt', delimiter="\t", header=None,names=["itinerary_name", "fare"])
it_leg = pd.read_csv('it_leg.txt', delimiter="\t", header=None,names=["itinerary_name", "leg_name"])
```
### <font color='red'>CREATING DICTIONARIES</font>
1. Itineraries and fligt legs are numberd for easy calculation
```
itinerary_dict = {}
l=0
for ind in demand.index:
itinerary_dict[demand['itinerary_name'][ind]] = l
l=l+1
leg_dict = {}
k=0
for ind in capacity.index:
leg_dict[capacity['leg_name'][ind]] = k
k=k+1
```
### <font color='red'>CREATING MATRIX and ARRAY</font>
```
Num_inerary,a = demand.shape
Num_flight_legs,s = capacity.shape
A = np.zeros( (Num_inerary+Num_flight_legs, Num_inerary) )
for i in range (Num_inerary):
A[i,i] = 1
for ind in it_leg.index:
i=itinerary_dict[it_leg['itinerary_name'][ind]]
j=Num_inerary+leg_dict[it_leg['leg_name'][ind]]
A[j,i] = 1
d = demand.iloc[:,1].values
c = capacity.iloc[:,1].values
f = fare.iloc[:,1].values
x = np.zeros((Num_inerary,1))
b = np.zeros((Num_inerary+Num_flight_legs))
fa = np.zeros((Num_inerary))
for i in range(Num_inerary):
b[i] = d[i]
for i in range(Num_inerary,Num_inerary+Num_flight_legs):
b[i] = c[i-Num_inerary]
for i in range (Num_inerary):
fa[i] = -f[i]
```
### <font color='red'>LINPROG</font>
1. Here we are minimizing the negative of the required function which need to be maximized .
2. Negative of the minimum gives maximun
```
res = linprog(c=fa, A_ub=A, b_ub=b,bounds=(0, None))
print (res)
net_profit = -res.fun
print (-res.fun)
```
### Duplicate of every table , matrix and array is created for doing the second part of the Assingment
```
demand_dup = demand
capacity_dup = capacity
fare_dup = fare
it_leg_dup = it_leg
d_dup = demand.iloc[:,1].values
c_dup = capacity.iloc[:,1].values
f_dup = fare.iloc[:,1].values
x_dup = np.zeros((Num_inerary,1))
b_dup = np.zeros((Num_inerary+Num_flight_legs))
fa_dup = np.zeros((Num_inerary))
```
### New demand and fares are calculated
```
sub = 'RHC'
start = 16
for ind in it_leg.index:
it_leg_dup["Indexes"] = it_leg_dup["leg_name"].str.find(sub, start)
for ind in it_leg_dup.index:
if (it_leg_dup['Indexes'][ind]==19):
i = itinerary_dict[it_leg_dup['itinerary_name'][ind]]
d_dup[i] = d[i]*115/100
f_dup[i] = f[i]*125/100
```
### <font color='red'>LINPROG</font>
```
for i in range(Num_inerary):
b_dup[i] = d_dup[i]
for i in range(Num_inerary,Num_inerary+Num_flight_legs):
b_dup[i] = c_dup[i-Num_inerary]
for i in range (Num_inerary):
fa_dup[i] = -f_dup[i]
res_new = linprog(c=fa_dup, A_ub=A, b_ub=b,bounds=(0, None))
print (res_new)
net_profit_new = -res_new.fun
print (-res_new.fun)
```
### Finding the top 10 most affected itinerary in terms of the optimal seat allocation.
1. Difference between the parameters in both the cases are calculated and arranged it in acendind order
2. So the last 10 itinerary are the most effected ones
```
res_diff = res.x - res_new.x
res_diff_abs = np.absolute(res_diff)
fare_dup['res_diff_abs'] = res_diff_abs
fare_dup.sort_values(["res_diff_abs"], axis=0,
ascending=True, inplace=True)
fare_dup
```
|
github_jupyter
|
import pandas as pd
import numpy as np
from scipy.optimize import linprog
from numpy.linalg import solve
demand = pd.read_csv('demand.txt', delimiter="\t", header=None,names=["itinerary_name", "demand"])
capacity = pd.read_csv('capacity.txt', delimiter="\t", header=None,names=["leg_name", "capacity"])
fare = pd.read_csv('fare.txt', delimiter="\t", header=None,names=["itinerary_name", "fare"])
it_leg = pd.read_csv('it_leg.txt', delimiter="\t", header=None,names=["itinerary_name", "leg_name"])
itinerary_dict = {}
l=0
for ind in demand.index:
itinerary_dict[demand['itinerary_name'][ind]] = l
l=l+1
leg_dict = {}
k=0
for ind in capacity.index:
leg_dict[capacity['leg_name'][ind]] = k
k=k+1
Num_inerary,a = demand.shape
Num_flight_legs,s = capacity.shape
A = np.zeros( (Num_inerary+Num_flight_legs, Num_inerary) )
for i in range (Num_inerary):
A[i,i] = 1
for ind in it_leg.index:
i=itinerary_dict[it_leg['itinerary_name'][ind]]
j=Num_inerary+leg_dict[it_leg['leg_name'][ind]]
A[j,i] = 1
d = demand.iloc[:,1].values
c = capacity.iloc[:,1].values
f = fare.iloc[:,1].values
x = np.zeros((Num_inerary,1))
b = np.zeros((Num_inerary+Num_flight_legs))
fa = np.zeros((Num_inerary))
for i in range(Num_inerary):
b[i] = d[i]
for i in range(Num_inerary,Num_inerary+Num_flight_legs):
b[i] = c[i-Num_inerary]
for i in range (Num_inerary):
fa[i] = -f[i]
res = linprog(c=fa, A_ub=A, b_ub=b,bounds=(0, None))
print (res)
net_profit = -res.fun
print (-res.fun)
demand_dup = demand
capacity_dup = capacity
fare_dup = fare
it_leg_dup = it_leg
d_dup = demand.iloc[:,1].values
c_dup = capacity.iloc[:,1].values
f_dup = fare.iloc[:,1].values
x_dup = np.zeros((Num_inerary,1))
b_dup = np.zeros((Num_inerary+Num_flight_legs))
fa_dup = np.zeros((Num_inerary))
sub = 'RHC'
start = 16
for ind in it_leg.index:
it_leg_dup["Indexes"] = it_leg_dup["leg_name"].str.find(sub, start)
for ind in it_leg_dup.index:
if (it_leg_dup['Indexes'][ind]==19):
i = itinerary_dict[it_leg_dup['itinerary_name'][ind]]
d_dup[i] = d[i]*115/100
f_dup[i] = f[i]*125/100
for i in range(Num_inerary):
b_dup[i] = d_dup[i]
for i in range(Num_inerary,Num_inerary+Num_flight_legs):
b_dup[i] = c_dup[i-Num_inerary]
for i in range (Num_inerary):
fa_dup[i] = -f_dup[i]
res_new = linprog(c=fa_dup, A_ub=A, b_ub=b,bounds=(0, None))
print (res_new)
net_profit_new = -res_new.fun
print (-res_new.fun)
res_diff = res.x - res_new.x
res_diff_abs = np.absolute(res_diff)
fare_dup['res_diff_abs'] = res_diff_abs
fare_dup.sort_values(["res_diff_abs"], axis=0,
ascending=True, inplace=True)
fare_dup
| 0.122143 | 0.924483 |
# Overview
- 特徴量がある値でサチっているのが気になるのでそれを補正する
# Const
```
NB = '008'
PATH_TRAIN = '../data_ignore/input/train_features.csv'
PATH_TRAIN_SCORED = '../data_ignore/input/train_targets_scored.csv'
PATH_TRAIN_NONSCORED = '../data_ignore/input/train_targets_nonscored.csv'
PATH_SUB = '../data_ignore/input/sample_submission.csv'
PATH_TEST = '../data_ignore/input/test_features.csv'
PATH_GROUP = '../data_ignore/output_nb/nb004/group.csv'
cp = ['#f8b195', '#f67280', '#c06c84', '#6c5b7b', '#355c7d']
```
# Import everything I need :)
```
import os
from pprint import pprint
import warnings
import numpy as np
import pandas as pd
from scipy import stats
import numpy.random as rd
import matplotlib.pyplot as plt
import seaborn as sns
from fastprogress import progress_bar
from sklearn.model_selection import GroupKFold, KFold
from sklearn.metrics import log_loss
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
```
# My function
```
def get_696_strategy_fold(group, n_splits, seed=0):
'''nb004'''
# group == 0 (all_target=0)の分解
mask_0 = group['group'] == 0
group_0 = group[mask_0]
splitter = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
df_fold_0 = pd.DataFrame()
df_fold_0['fold'] = np.zeros(len(group_0)).astype(int)
for i_fold, (idx_trn, idx_val) in enumerate(splitter.split(group_0)):
df_fold_0['fold'][idx_val] = int(i_fold + 1)
# group != 0 の分解
mask_not0 = group['group'] != 0
group_not0 = group[mask_not0]
splitter = GroupKFold(n_splits=n_splits)
df_fold_not0 = pd.DataFrame()
df_fold_not0['fold'] = np.zeros(len(group_not0)).astype(int)
for i_fold, (idx_trn, idx_val) in enumerate(splitter.split(group_not0, groups=group_not0['group'].values)):
df_fold_not0['fold'][idx_val] = int(i_fold + 1)
# fold情報の結合
df_fold = pd.DataFrame()
df_fold['sig_id'] = group['sig_id'].values
df_fold['fold'] = np.zeros(len(group)).astype(int)
df_fold['fold'][mask_0.values] = df_fold_0['fold'].values
df_fold['fold'][mask_not0.values] = df_fold_not0['fold'].values
return df_fold
def mean_log_loss(y_true, y_pred):
metrics = []
for i, target in enumerate(targets.columns):
metrics.append(log_loss(y_true[:, i], y_pred[:, i].astype(float), labels=[0,1]))
return np.mean(metrics)
```
# Preparation
set
```
sns.set()
np.random.seed(seed=0)
pd.set_option('display.max_columns', 1000)
warnings.filterwarnings('ignore')
```
<br>
load datasets
```
train_features = pd.read_csv(PATH_TRAIN, index_col='sig_id')
train_targets = pd.read_csv(PATH_TRAIN_SCORED, index_col='sig_id')
# train_nonscored = pd.read_csv(PATH_TRAIN_NONSCORED)
# sub = pd.read_csv(PATH_SUB)
test_features = pd.read_csv(PATH_TEST, index_col='sig_id')
targets = train_targets.copy()
train_features_new = train_features.copy()
train_features.head()
```
# EDA
すべての特徴量はの最大値最小値は、10と-10となっている
```
train_features.iloc[:, 3:].describe()
```
<br>
分布の確認
```
df_random = train_features.iloc[:, 3:].sample(n=9, axis=1, random_state=5).sort_index(axis=1)
fig, axs = plt.subplots(3, 3, figsize=(20, 6))
axs = axs.ravel()
for i, col in enumerate(df_random.columns):
sns.distplot(df_random[col], ax=axs[i], label=col)
axs[i].legend()
```
<br>
-10がどれぐらい入っているか確認
```
count_10 = np.sum(train_features.iloc[:, 3:]<-9.8, axis=0)
count_10 = count_10.sort_values(ascending=False)
print('-9.8未満がの割合(%)が多い特徴量ランキングtop10')
count_10[:10]/len(train_features)*100
```
<br>
1位を確認してみる
```
name = count_10.index[0]
sns.distplot(train_features[name].values)
plt.title(name)
```
---> めっちゃ-10入ってる...
# 3位(c-93)を解析してみる
- 3位: c-93
```
name = count_10.index[2]
mask_m10 = train_features[name] < -9.8
n_m10 = np.sum(mask_m10)
sns.distplot(train_features[name].values)
plt.title(name)
```
<br>
-10付近の確認
```
mask = train_features[name].values < -2
feat_mask = train_features[name][mask]
sns.distplot(feat_mask.values, bins=20)
plt.title(name)
```
<br>
この-10を自然な感じにしたい
```
lam = 0.3
rand = rd.exponential(1./lam, size=n_m10)
rand_ = -rand - 9.8
range_bin_width = np.linspace(-15, 1, 40)
# sns.distplot(feat_mask.values, bins=range_bin_width, kde=False, label='original')
# sns.distplot(rand_, bins=range_bin_width, kde=False, label='sampling')
plt.hist(feat_mask.values, bins=range_bin_width, label='original', alpha=0.5)
plt.hist(rand_, bins=range_bin_width, label='sampling', alpha=0.5)
plt.legend()
```
<br>
sampling値に置き換え
```
train_features_new[name][mask_m10] = rand_
mask = train_features[name] < 1
fig, axs = plt.subplots(2, 1, figsize=(10, 4))
fig.suptitle(name)
sns.distplot(train_features[name][mask], kde=False, bins=range_bin_width, ax=axs[0], label='original')
axs[0].legend(loc='upper left')
axs[0].set_ylim(0, 500)
sns.distplot(train_features_new[name][mask], kde=False, bins=range_bin_width, ax=axs[1], label='new')
axs[1].legend(loc='upper left')
axs[1].set_ylim(0, 500)
```
---> いい感じ!!
# Summary
- 置き換える関数作る
```
def add_exp_noise(df_feat, low_val=-9.8, high_val=9.8):
_df_feat = df_feat.copy()
lam = 0.3
for name in progress_bar(_df_feat.columns):
mask_low = _df_feat[name] < low_val
n_low = np.sum(mask_low)
rand = rd.exponential(1./lam, size=n_low)
rand_ = -rand - low_val
_df_feat[name][mask_low] = rand_
mask_high = _df_feat[name] > high_val
n_high = np.sum(mask_high)
rand = rd.exponential(1./lam, size=n_high)
rand_ = rand + high_val
_df_feat[name][mask_high] = rand_
return _df_feat
df = add_exp_noise(train_features.iloc[:, 3:], low_val=-9.8, high_val=9.8)
```
<br>
可視化
```
mask = train_features[name] < 1
fig, axs = plt.subplots(2, 1, figsize=(10, 4))
fig.suptitle(name)
sns.distplot(train_features[name][mask], kde=False, bins=range_bin_width, ax=axs[0], label='original')
axs[0].legend(loc='upper left')
axs[0].set_ylim(0, 500)
sns.distplot(train_features_new[name][mask], kde=False, bins=range_bin_width, ax=axs[1], label='new')
axs[1].legend(loc='upper left')
axs[1].set_ylim(0, 500)
```
|
github_jupyter
|
NB = '008'
PATH_TRAIN = '../data_ignore/input/train_features.csv'
PATH_TRAIN_SCORED = '../data_ignore/input/train_targets_scored.csv'
PATH_TRAIN_NONSCORED = '../data_ignore/input/train_targets_nonscored.csv'
PATH_SUB = '../data_ignore/input/sample_submission.csv'
PATH_TEST = '../data_ignore/input/test_features.csv'
PATH_GROUP = '../data_ignore/output_nb/nb004/group.csv'
cp = ['#f8b195', '#f67280', '#c06c84', '#6c5b7b', '#355c7d']
import os
from pprint import pprint
import warnings
import numpy as np
import pandas as pd
from scipy import stats
import numpy.random as rd
import matplotlib.pyplot as plt
import seaborn as sns
from fastprogress import progress_bar
from sklearn.model_selection import GroupKFold, KFold
from sklearn.metrics import log_loss
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
def get_696_strategy_fold(group, n_splits, seed=0):
'''nb004'''
# group == 0 (all_target=0)の分解
mask_0 = group['group'] == 0
group_0 = group[mask_0]
splitter = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
df_fold_0 = pd.DataFrame()
df_fold_0['fold'] = np.zeros(len(group_0)).astype(int)
for i_fold, (idx_trn, idx_val) in enumerate(splitter.split(group_0)):
df_fold_0['fold'][idx_val] = int(i_fold + 1)
# group != 0 の分解
mask_not0 = group['group'] != 0
group_not0 = group[mask_not0]
splitter = GroupKFold(n_splits=n_splits)
df_fold_not0 = pd.DataFrame()
df_fold_not0['fold'] = np.zeros(len(group_not0)).astype(int)
for i_fold, (idx_trn, idx_val) in enumerate(splitter.split(group_not0, groups=group_not0['group'].values)):
df_fold_not0['fold'][idx_val] = int(i_fold + 1)
# fold情報の結合
df_fold = pd.DataFrame()
df_fold['sig_id'] = group['sig_id'].values
df_fold['fold'] = np.zeros(len(group)).astype(int)
df_fold['fold'][mask_0.values] = df_fold_0['fold'].values
df_fold['fold'][mask_not0.values] = df_fold_not0['fold'].values
return df_fold
def mean_log_loss(y_true, y_pred):
metrics = []
for i, target in enumerate(targets.columns):
metrics.append(log_loss(y_true[:, i], y_pred[:, i].astype(float), labels=[0,1]))
return np.mean(metrics)
sns.set()
np.random.seed(seed=0)
pd.set_option('display.max_columns', 1000)
warnings.filterwarnings('ignore')
train_features = pd.read_csv(PATH_TRAIN, index_col='sig_id')
train_targets = pd.read_csv(PATH_TRAIN_SCORED, index_col='sig_id')
# train_nonscored = pd.read_csv(PATH_TRAIN_NONSCORED)
# sub = pd.read_csv(PATH_SUB)
test_features = pd.read_csv(PATH_TEST, index_col='sig_id')
targets = train_targets.copy()
train_features_new = train_features.copy()
train_features.head()
train_features.iloc[:, 3:].describe()
df_random = train_features.iloc[:, 3:].sample(n=9, axis=1, random_state=5).sort_index(axis=1)
fig, axs = plt.subplots(3, 3, figsize=(20, 6))
axs = axs.ravel()
for i, col in enumerate(df_random.columns):
sns.distplot(df_random[col], ax=axs[i], label=col)
axs[i].legend()
count_10 = np.sum(train_features.iloc[:, 3:]<-9.8, axis=0)
count_10 = count_10.sort_values(ascending=False)
print('-9.8未満がの割合(%)が多い特徴量ランキングtop10')
count_10[:10]/len(train_features)*100
name = count_10.index[0]
sns.distplot(train_features[name].values)
plt.title(name)
name = count_10.index[2]
mask_m10 = train_features[name] < -9.8
n_m10 = np.sum(mask_m10)
sns.distplot(train_features[name].values)
plt.title(name)
mask = train_features[name].values < -2
feat_mask = train_features[name][mask]
sns.distplot(feat_mask.values, bins=20)
plt.title(name)
lam = 0.3
rand = rd.exponential(1./lam, size=n_m10)
rand_ = -rand - 9.8
range_bin_width = np.linspace(-15, 1, 40)
# sns.distplot(feat_mask.values, bins=range_bin_width, kde=False, label='original')
# sns.distplot(rand_, bins=range_bin_width, kde=False, label='sampling')
plt.hist(feat_mask.values, bins=range_bin_width, label='original', alpha=0.5)
plt.hist(rand_, bins=range_bin_width, label='sampling', alpha=0.5)
plt.legend()
train_features_new[name][mask_m10] = rand_
mask = train_features[name] < 1
fig, axs = plt.subplots(2, 1, figsize=(10, 4))
fig.suptitle(name)
sns.distplot(train_features[name][mask], kde=False, bins=range_bin_width, ax=axs[0], label='original')
axs[0].legend(loc='upper left')
axs[0].set_ylim(0, 500)
sns.distplot(train_features_new[name][mask], kde=False, bins=range_bin_width, ax=axs[1], label='new')
axs[1].legend(loc='upper left')
axs[1].set_ylim(0, 500)
def add_exp_noise(df_feat, low_val=-9.8, high_val=9.8):
_df_feat = df_feat.copy()
lam = 0.3
for name in progress_bar(_df_feat.columns):
mask_low = _df_feat[name] < low_val
n_low = np.sum(mask_low)
rand = rd.exponential(1./lam, size=n_low)
rand_ = -rand - low_val
_df_feat[name][mask_low] = rand_
mask_high = _df_feat[name] > high_val
n_high = np.sum(mask_high)
rand = rd.exponential(1./lam, size=n_high)
rand_ = rand + high_val
_df_feat[name][mask_high] = rand_
return _df_feat
df = add_exp_noise(train_features.iloc[:, 3:], low_val=-9.8, high_val=9.8)
mask = train_features[name] < 1
fig, axs = plt.subplots(2, 1, figsize=(10, 4))
fig.suptitle(name)
sns.distplot(train_features[name][mask], kde=False, bins=range_bin_width, ax=axs[0], label='original')
axs[0].legend(loc='upper left')
axs[0].set_ylim(0, 500)
sns.distplot(train_features_new[name][mask], kde=False, bins=range_bin_width, ax=axs[1], label='new')
axs[1].legend(loc='upper left')
axs[1].set_ylim(0, 500)
| 0.342132 | 0.73301 |
```
import numpy as np
```
### Plotting and Visualization Using `Matplotlib`
- Making plots is one of the most critical tasks in data science
- `Matplotlib` is a desktop package for creating primarily 2D publication-ready plots
- Inspired by MATLAB and commands almost equivalent across MATLAB and Matplotlib
- `Matplotlib` has spawned many add-on toolkits for data visualization that build on top of `Matplotlib`
### Some Popular Plotting Tools
* Seaborn: https://seaborn.pydata.org/examples/index.html
* Desktop publishing with best practices to that is based on research on how people best perceive information
* Excellent default parameters
* Plotly (https://plot.ly/python/)
* Has an API for Python (as well as other languages)
* The app itself is built on Python and DJANGO
* The same company that created Dash
### Some Popular Plotting Tools - Cont'd
* Dash by plotly (https://dash.plot.ly/gallery)
* Intuitive to use, elegant design
* Support interactive visualization
* Bokeh (https://bokeh.pydata.org/en/latest/docs/gallery.html)
* Python interactive visualization for web browsers
* plotnice (aka. ggplot2) (https://plotnine.readthedocs.io/en/stable/index.html)
* `plotnine` is an implementation of a grammar of graphics in Python
* based on ggplot2, the most popular libry used in with the R language
### Matplotlib: a Brief Intro
* `Matplotlib` can be overwhelming, a good place to start is the gallery (https://matplotlib.org/gallery.html)
* A multitude of plots to use for different scenarios
* `matplotlib`'s main module is `pyplot`
* Imported using the `plt` alias
```python
import matplotlib.pyplot as plt
```
### Matplotlib: Plotting with X and Y axes
```python
import matplotlib.pyplot as plt
x = [1,2,3,5,6,7,8,9,10]
y = [1,2,3,5,6,7,8,9,10]
plt.plot(x, y)
```
```
y = [1.9**i for i in x ]
plt.plot(x, y)
```
#### Plot Figure and Axes
* Plots in Matplotlib reside within a Figure object
* A figure contains the overall window where plotting happens
* Contains the title axes, labels, ticks, etc..
```python
plt.figure()
```
* Figures are empty by default. Therefore, you need to add a axes (plot) to visualize your data
* To add a plot, you need to determine the geometry and indicate where you want the plot to go
* This returns the axes of a new subplot. It's an object of type `AxesSubplot`
```python
fig.axes(rect)
# or
fig.add_subplot(rect)
```
* Therefore, Figure and plot/subplot (axes) are two different things

```
fig = plt.figure()
fig = plt.figure()
# add_axes takes a rectangle [x0, y0, width, height]
fig.add_axes([0, 0, 1,1])
fig = plt.figure()
# add_axes takes a rectangle [x0, y0, width, height]
ax = fig.add_axes([0, 0, 1,1])
ax.plot([1,2,3,4,5], [1,2,3,4,5])
fig = plt.figure()
ax = fig.add_subplot()
print(f"ax is of type {type(ax)}")
fig = plt.figure()
ax = fig.add_subplot()
ax.plot([1,2,3,4,5], [1,2,3,4,5])
```
### Plotting Steps
* When you issue the command plt.plot(x, y ), the following happens
* Matplotlib uses the figure created in a cell or creates one if none exists
* Uses the last axes used or creates one if necessary
```python
fig = plt.figure()
ax = fig.add_subplot()
ax.plot([1,2,3,4,5], [1,2,3,4,5])
```
* Is equivalent to
```python
plt.plot([1,2,3,4,5], [1,2,3,4,5])
```
```
# automatically plot on any cerated figure and axis
fig = plt.figure()
plt.plot([1,2,3,4,5], [1,2,3,4,5])
plt.plot([1,2,3,4,5], [1,4,9,16,25])
```
### Creating Multiple Subplots
* It is possible to draw multiple plots (subplots) in one figure using the `subplots()` or `add_subplot()` functions
* Each subplot has its own setos axis.
* Subplot layout is organized in rows and columns
* A subplot needs to know where in the row/column layout it belongs
```python
add_subplot(num_rows, num_cols, position_of_subplot)
```
```
# Sample data
x = [1,2,3,4,5]
y_1 = [1,2,3,4,5]
y_2 = [i**2 for i in x]
y_3 = [i**3 for i in x]
y_4 = [i**4 for i in x]
x, y_1, y_2, y_3, y_4
# creating the subplots
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1) # equivalent ot ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(2,2,2) # equivalent ot ax1 = fig.add_subplot(222)
ax3 = fig.add_subplot(2,2,3) # equivalent ot ax1 = fig.add_subplot(223)
# using add_subplot
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1) # equivalent ot ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(2,2,2) # equivalent ot ax1 = fig.add_subplot(222)
ax3 = fig.add_subplot(2,2,3) # equivalent ot ax1 = fig.add_subplot(223)
ax1.plot(x, y_1)
ax2.plot(x, y_2)
ax3.plot(x, y_3)
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1) # equivalent ot ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(2,2,4) # equivalent ot ax1 = fig.add_subplot(224)
ax1.plot(x, y_1)
ax2.plot(x, y_3)
# plot uses the last plot used.
fig = plt.figure(figsize=(11,2))
ax1 = fig.add_subplot(1,2,1) # equivalent ot ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(1,2,2) # equivalent ot ax1 = fig.add_subplot(222)
_ = plt.plot(np.random.randn(50).cumsum(), 'k--')
# Using subplot()
plt.subplot(2,2,1)
plt.plot(x, y_1)
plt.subplot(2,2,2)
plt.plot(x, y_2)
plt.subplot(2,2,3)
plt.plot(x, y_3)
plt.subplot(2,2,4)
plt.plot(x, y_4)
```
### Creating an Array of Subplots
* Creating an array of subplots is a common task
* `matplotlib` includes a function that generates the fig and all the plots using a single function
* The plots car share the same x-axis or y-axis,
* Many other options to control the spacing padding, figure size, etc.
```
fig, axes = plt.subplots(2,3, figsize=(14,6))
print(axes)
print("---------------------")
axes[0,0].plot(x, y_1)
axes[1,2].plot(x, y_4)
```
### Basic Plotting Parameters - Cont'd
* Plot have various functions to customize them
* Options are either:
* plot specific
* e.g. plot size, plot title, x-axis lebel, witespace, legend etc.
* Line color, Line type (e.g.: dashed, width=2), Labels
```python
plt.plot(x,y, color='red', linestyle='--')
```python
```
y = [1.9**i for i in x ]
plt.plot(x,y, color='red', linestyle='-.', linewidth=4)
```
### Basic Plotting Parameters - Cont'd
- In addition to the data, `plot` accepts a string abbreviating color and line style
- This is a convenience
```python
plt.plot(x,y, 'rx')
# or
plt.plot(x,y, 'k--')
```
* `plt.plot(x,y, 'k--')` can be expressed as `plt.plot(x,y, color='k', linestyle='--')`
* You can also specify color using hex code
```
y = 1.9**x
plt.plot(x,y, 'k--')
y = 30*x + 4
plt.plot(x,y, 'rx')
y = 40*x + 4
plt.plot(x,y, color='#feb24c')
```
### Plot Legend
* Adding legends is critical for identifying plot elements
* The easiest way to add a legend is to pass a label when adding each element and calling the legend() function on the plot
* the label can be a `latex` equation
* The legend is placed on the plot in a way that interferes the least with the plot
* You can change the location of the legend manually
```
plt.figure(figsize=(8,4))
y = [1.9**i for i in x]
plt.plot(x, y, 'k--', label= "$1.9^x$")
y = [30*i + 4 for i in x]
plt.plot(x, y, 'rx', label= "_no_legend_")
y = [40*i + 4 for i in x]
plt.plot(x ,y, color='#feb24c', label= "$ 40 \cdot x + 4$")
plt.legend()
```
### Plot Labels
* The minimum required labels to describe a plot are meaningful `x` and `y` label, and a plot title
* Axes can have a title, and so does the figure
* Title and labels can be customized
* Change font type and size, color, etc.
```
fig = plt.figure(figsize=(8,4))
ax = fig.add_subplot()
_ = ax.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25])
fig.suptitle('This a generic figure title')
ax.set_title("This is some plot title")
ax.set_xlabel("A range of values")
# can you guess why I am using _ = here?
_ = ax.set_ylabel("Some other range of values")
```
### Changing Style Via Presets
* Matplot lib provides various style presets to improve the look and feel of the plots
* The presets control the fonts, the colors, borders and line widths, etc.
* See https://matplotlib.org/stable/gallery/style_sheets/style_sheets_reference.html
!~[](https://www.dropbox.com/s/h8fcd13v06bkhyf/styles.png?dl=1)
```python
plt.style.use('seaborn-whitegrid')
```
```
plt.style.use('seaborn-whitegrid')
fig, axes = plt.subplots(2,3, sharex=True, sharey=True, figsize=(8,3))
axes[0,0].plot(x, y_3)
axes[1,2].plot(x, y_4)
# Do not plt any whitespace between the subplots
# useful when sharing x and/or y
# used when it makes sense
plt.subplots_adjust(wspace=0, hspace=0)
```
### Anatomy of a Matplotlib Figure
* The features of a matplotlib figure (https://matplotlib.org/stable/gallery/showcase/anatomy.html#)
<img src="https://matplotlib.org/stable/_images/sphx_glr_anatomy_001_2_0x.png" alt="Girl in a jacket" width="400">
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
#### Plot Figure and Axes
* Plots in Matplotlib reside within a Figure object
* A figure contains the overall window where plotting happens
* Contains the title axes, labels, ticks, etc..
* Figures are empty by default. Therefore, you need to add a axes (plot) to visualize your data
* To add a plot, you need to determine the geometry and indicate where you want the plot to go
* This returns the axes of a new subplot. It's an object of type `AxesSubplot`
* Therefore, Figure and plot/subplot (axes) are two different things

### Plotting Steps
* When you issue the command plt.plot(x, y ), the following happens
* Matplotlib uses the figure created in a cell or creates one if none exists
* Uses the last axes used or creates one if necessary
* Is equivalent to
### Creating Multiple Subplots
* It is possible to draw multiple plots (subplots) in one figure using the `subplots()` or `add_subplot()` functions
* Each subplot has its own setos axis.
* Subplot layout is organized in rows and columns
* A subplot needs to know where in the row/column layout it belongs
### Creating an Array of Subplots
* Creating an array of subplots is a common task
* `matplotlib` includes a function that generates the fig and all the plots using a single function
* The plots car share the same x-axis or y-axis,
* Many other options to control the spacing padding, figure size, etc.
### Basic Plotting Parameters - Cont'd
* Plot have various functions to customize them
* Options are either:
* plot specific
* e.g. plot size, plot title, x-axis lebel, witespace, legend etc.
* Line color, Line type (e.g.: dashed, width=2), Labels
### Basic Plotting Parameters - Cont'd
- In addition to the data, `plot` accepts a string abbreviating color and line style
- This is a convenience
* `plt.plot(x,y, 'k--')` can be expressed as `plt.plot(x,y, color='k', linestyle='--')`
* You can also specify color using hex code
### Plot Legend
* Adding legends is critical for identifying plot elements
* The easiest way to add a legend is to pass a label when adding each element and calling the legend() function on the plot
* the label can be a `latex` equation
* The legend is placed on the plot in a way that interferes the least with the plot
* You can change the location of the legend manually
### Plot Labels
* The minimum required labels to describe a plot are meaningful `x` and `y` label, and a plot title
* Axes can have a title, and so does the figure
* Title and labels can be customized
* Change font type and size, color, etc.
### Changing Style Via Presets
* Matplot lib provides various style presets to improve the look and feel of the plots
* The presets control the fonts, the colors, borders and line widths, etc.
* See https://matplotlib.org/stable/gallery/style_sheets/style_sheets_reference.html
!~[](https://www.dropbox.com/s/h8fcd13v06bkhyf/styles.png?dl=1)
| 0.820254 | 0.991058 |
Image processing
-----------------
first , we need to import some packages.
```
import cv2
import numpy as np
import matplotlib.pyplot as plt
```
Color
--------------
Notice color Sequentially
* opencv : (B,G,R)
* matplolib : (R,G,B)
Now we use matplotlib show these images.
Usually , we use black-white images to do something , because it can reduce the complexity.
```
coin_path = "data/image_processing/coin.png"
image = cv2.imread(coin_path)
image_plt = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.figure()
plt.subplot(1,2,1)
plt.imshow(image_plt)
## reference : https://www.cnblogs.com/denny402/p/5122594.html
plt.subplot(1,2,2)
plt.imshow(gray, cmap=plt.get_cmap('gray'))
```
Blurry
------------
If there are a lot of noise in image , we couldn't distinguish what the object is.
So we have to use a function to reduce noice. One way is using blurring.
Next , we'll use Gaussian Blur to make images blurry.
* Gaussian api on opencv : cv2.GaussianBlur( image , ( w , h ) , std )
```
blurred = {
1 : cv2.GaussianBlur(gray, (1, 1),0),
3 : cv2.GaussianBlur(gray, (3, 3),0),
5 : cv2.GaussianBlur(gray, (5, 5),0),
7 : cv2.GaussianBlur(gray, (7, 7),0),
9 : cv2.GaussianBlur(gray, (9, 9),0)
}
blurred_index = 9
plt.figure()
plt.imshow(blurred[blurred_index], cmap=plt.get_cmap('gray'))
```
edge
-------
we can use ``canny`` to find the edge.
* cv2.canny(image, upper_threshold , lower_threshold )
```
fig = plt.figure()
for index in range(1,10,2):
canny = cv2.Canny(blurred[index], 30, 150)
ax = fig.add_subplot(330 + index)
ax.set_title(index)
plt.subplot(3,3,index)
plt.imshow(canny , cmap=plt.get_cmap('gray'))
```
Contour
--------------
Notce :
When use
* cv2.drawContours()original
image will be modified.
```
fig = plt.figure()
coin_canny = cv2.Canny(blurred[9], 30, 150)
# reference : https://blog.csdn.net/on2way/article/details/46812121
ret, binary = cv2.threshold(coin_canny,127,255,cv2.THRESH_BINARY)
# reference : https://blog.csdn.net/hjxu2016/article/details/77833336
contours,hierarchy= cv2.findContours(binary, cv2.RETR_TREE , cv2.CHAIN_APPROX_SIMPLE) #cv2.RETR_TREE
image = cv2.imread(coin_path)
coin_contours_image = cv2.drawContours(image,contours,-1,(0,0,255),thickness=0)
coin_contours_plt = cv2.cvtColor(coin_contours_image, cv2.COLOR_BGR2RGB)
plt.imshow(coin_contours_plt)
```
we also can find the center. Use :
* cv2.moments(contours)
Marks :
* cv2.circle(image, (cX,cY), radius, color, thickness)
```
# reference : https://chtseng.wordpress.com/2016/12/05/opencv-contour%E8%BC%AA%E5%BB%93/
clone_center = coin_contours_image.copy()
for cnt in contours:
Moments = cv2.moments(cnt)
cX = int(Moments['m10'] / Moments['m00'])
cY = int(Moments['m01'] / Moments['m00'])
clone_center = cv2.circle(clone_center,(cX,cY),10,(1,227,254),-1)
clone_center = cv2.cvtColor(clone_center, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(clone_center)
```
Use fitting rectangle
We can use :
* fit rectangle : cv2.boundingRect(Contours)
```
image = cv2.imread(coin_path)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
```
Mask
----------
First , we can find the contours
```
mask_path = "data/image_processing/mask.png"
mask_image = cv2.imread(mask_path)
clone_mask_image = mask_image.copy()
gray = cv2.cvtColor(clone_mask_image, cv2.COLOR_BGR2GRAY)
(contours, _) = cv2.findContours(gray.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
clone_mask_image = cv2.drawContours(clone_mask_image, contours, -1, (0, 255, 0), 5)
clone_mask_image = cv2.cvtColor(clone_mask_image, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(mask_image)
plt.figure()
plt.imshow(clone_mask_image)
```
Next , accorinding to the contour , we can make mask.
```
for cnt in contours:
mask = np.zeros(gray.shape, dtype="uint8")
mask = cv2.drawContours(mask, [cnt] , -1 , 255, -1) #255 →白色, -1→塗滿
new_image = cv2.bitwise_and(mask_image,mask_image,mask=mask)
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(new_image)
```
|
github_jupyter
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
coin_path = "data/image_processing/coin.png"
image = cv2.imread(coin_path)
image_plt = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.figure()
plt.subplot(1,2,1)
plt.imshow(image_plt)
## reference : https://www.cnblogs.com/denny402/p/5122594.html
plt.subplot(1,2,2)
plt.imshow(gray, cmap=plt.get_cmap('gray'))
blurred = {
1 : cv2.GaussianBlur(gray, (1, 1),0),
3 : cv2.GaussianBlur(gray, (3, 3),0),
5 : cv2.GaussianBlur(gray, (5, 5),0),
7 : cv2.GaussianBlur(gray, (7, 7),0),
9 : cv2.GaussianBlur(gray, (9, 9),0)
}
blurred_index = 9
plt.figure()
plt.imshow(blurred[blurred_index], cmap=plt.get_cmap('gray'))
fig = plt.figure()
for index in range(1,10,2):
canny = cv2.Canny(blurred[index], 30, 150)
ax = fig.add_subplot(330 + index)
ax.set_title(index)
plt.subplot(3,3,index)
plt.imshow(canny , cmap=plt.get_cmap('gray'))
fig = plt.figure()
coin_canny = cv2.Canny(blurred[9], 30, 150)
# reference : https://blog.csdn.net/on2way/article/details/46812121
ret, binary = cv2.threshold(coin_canny,127,255,cv2.THRESH_BINARY)
# reference : https://blog.csdn.net/hjxu2016/article/details/77833336
contours,hierarchy= cv2.findContours(binary, cv2.RETR_TREE , cv2.CHAIN_APPROX_SIMPLE) #cv2.RETR_TREE
image = cv2.imread(coin_path)
coin_contours_image = cv2.drawContours(image,contours,-1,(0,0,255),thickness=0)
coin_contours_plt = cv2.cvtColor(coin_contours_image, cv2.COLOR_BGR2RGB)
plt.imshow(coin_contours_plt)
# reference : https://chtseng.wordpress.com/2016/12/05/opencv-contour%E8%BC%AA%E5%BB%93/
clone_center = coin_contours_image.copy()
for cnt in contours:
Moments = cv2.moments(cnt)
cX = int(Moments['m10'] / Moments['m00'])
cY = int(Moments['m01'] / Moments['m00'])
clone_center = cv2.circle(clone_center,(cX,cY),10,(1,227,254),-1)
clone_center = cv2.cvtColor(clone_center, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(clone_center)
image = cv2.imread(coin_path)
for cnt in contours:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
mask_path = "data/image_processing/mask.png"
mask_image = cv2.imread(mask_path)
clone_mask_image = mask_image.copy()
gray = cv2.cvtColor(clone_mask_image, cv2.COLOR_BGR2GRAY)
(contours, _) = cv2.findContours(gray.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
clone_mask_image = cv2.drawContours(clone_mask_image, contours, -1, (0, 255, 0), 5)
clone_mask_image = cv2.cvtColor(clone_mask_image, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(mask_image)
plt.figure()
plt.imshow(clone_mask_image)
for cnt in contours:
mask = np.zeros(gray.shape, dtype="uint8")
mask = cv2.drawContours(mask, [cnt] , -1 , 255, -1) #255 →白色, -1→塗滿
new_image = cv2.bitwise_and(mask_image,mask_image,mask=mask)
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
plt.figure()
plt.imshow(new_image)
| 0.521715 | 0.889673 |
## scaling out k-means to multi-GPU when data is large
```
import pandas as pd
import cudf
# Data generation.
from sklearn.datasets import make_blobs
# Local compute.
from sklearn.cluster import KMeans as cpuKMeans
from cuml.cluster import KMeans as gpuKMeans
# Distributed compute.
import dask_cudf
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
from dask_ml.cluster import KMeans as daskKMeans
from cuml.dask.cluster.kmeans import KMeans as mnmgKMeans
# Comparing results.
import cupy
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_rand_score
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
# Enable drawing images in this notebook. No need to for interactive graphics.
%matplotlib inline
import gcsfs
fs = gcsfs.GCSFileSystem(cache_timeout = 0)
```
## Generate data
```
# Generate a large dataset.
def generate_data(
file_name,
n_samples = 1000000,
n_features = 2,
n_clusters = 6
):
input_data, input_labels = make_blobs(n_samples=n_samples,
n_features=n_features,
centers=n_clusters,
random_state = 1
)
# Save the data for CPU compute.
data_cpu = pd.DataFrame(input_data, columns = ['x', 'y'])
data_cpu['label'] = input_labels
# data_cpu.to_csv("/root/data.csv", index = False)
# fs.put("/root/data.csv",f"gs://shakdemo-hyperplane/data/synthetic_data/{file_name}.csv")
data_cpu.to_csv(f"gs://shakdemo-hyperplane/data/synthetic_data/{file_name}.csv", index = False)
return data_cpu
generate_data(0)
for i in tqdm(range(50)):
generate_data(i)
!nvidia-smi
!echo "Threads/core: $(nproc --all)"
!free -h
```
## Get file list
```
## initiate GCP storage access
import gcsfs
fs = gcsfs.GCSFileSystem(cache_timeout = 0)
file_path = "gs://shakdemo-hyperplane/data/synthetic_data"
file_list = fs.ls(file_path)
print(f"number of files {len(file_list)}")
```
## CPU local
```
# read data
data_cpu = []
for file in tqdm(file_list):
data_cpu.append(pd.read_csv(f"gs://{file}"))
data_cpu = pd.concat(data_cpu, ignore_index = True)
print(data_cpu.shape)
print(f'data size {data_cpu.memory_usage(deep = True).sum()/1024/1024/1024 :.2f} G')
# Plot the raw data and labels.
samples_frac = 0.01
df_sample = data_cpu.sample(frac = samples_frac)
# fig = plt.figure(figsize=(16,10))
# plt.scatter(df_sample['x'], df_sample['y'], c=df_sample['label'], s=20, cmap='viridis')
df_sample.shape
%%time
# Instantiate, train and predict.
kmeans_cpu = cpuKMeans(init="k-means++",
n_clusters=6,
random_state=0)
kmeans_cpu.fit(df_sample[['x','y']])
labels_cpu = kmeans_cpu.predict(df_sample[['x','y']])
```
## GPU local
```
%%time
# Instantiate, train and predict.
data_gpu = cudf.DataFrame(data_cpu)
%%time
kmeans_gpu = gpuKMeans(init="k-means||",
n_clusters=6,
random_state=0)
kmeans_gpu.fit(data_gpu[['x','y']])
labels_gpu = kmeans_gpu.predict(data_gpu[['x','y']])
```
## distributed Dask with GPU
```
## spin up a remote dask cluster
from hyperplane import notebook_common as nc
client, cluster = nc.initialize_cluster(
num_workers = 2,
ngpus = 1,
nprocs=1,
nthreads=12,
ram_gb_per_proc=80,
cores_per_worker=12,
scheduler_ram = "4Gi",
scheduler_cpu = "1500m",
node_selector = '',
)
client.nthreads()
%%time
import dask_cudf
file_path = "gs://shakdemo-hyperplane/data/synthetic_data/*.csv"
ddf = dask_cudf.read_csv(file_path)
ddf.head(2)
ddf.npartitions
ddf = client.persist(ddf)
ddf.map_partitions(len).compute().sum()
from cuml.dask.cluster.kmeans import KMeans as mnmgKMeans
%%time
# Setup the Dask task graph.
# Instantiate, train and predict.
kmeans_mnmg = mnmgKMeans(init="k-means||",
n_clusters=6,
random_state=0)
kmeans_mnmg.fit(ddf[['x','y']])
kmeans_mnmg_df = kmeans_mnmg.predict(ddf[['x','y']])
# Execute the Dask task graph.
labels_mnmg = kmeans_mnmg_df.compute()
# Display the output.
print('MNMG k-means labels:')
print(labels_mnmg.value_counts())
cluster.close()
```
|
github_jupyter
|
import pandas as pd
import cudf
# Data generation.
from sklearn.datasets import make_blobs
# Local compute.
from sklearn.cluster import KMeans as cpuKMeans
from cuml.cluster import KMeans as gpuKMeans
# Distributed compute.
import dask_cudf
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
from dask_ml.cluster import KMeans as daskKMeans
from cuml.dask.cluster.kmeans import KMeans as mnmgKMeans
# Comparing results.
import cupy
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_rand_score
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
# Enable drawing images in this notebook. No need to for interactive graphics.
%matplotlib inline
import gcsfs
fs = gcsfs.GCSFileSystem(cache_timeout = 0)
# Generate a large dataset.
def generate_data(
file_name,
n_samples = 1000000,
n_features = 2,
n_clusters = 6
):
input_data, input_labels = make_blobs(n_samples=n_samples,
n_features=n_features,
centers=n_clusters,
random_state = 1
)
# Save the data for CPU compute.
data_cpu = pd.DataFrame(input_data, columns = ['x', 'y'])
data_cpu['label'] = input_labels
# data_cpu.to_csv("/root/data.csv", index = False)
# fs.put("/root/data.csv",f"gs://shakdemo-hyperplane/data/synthetic_data/{file_name}.csv")
data_cpu.to_csv(f"gs://shakdemo-hyperplane/data/synthetic_data/{file_name}.csv", index = False)
return data_cpu
generate_data(0)
for i in tqdm(range(50)):
generate_data(i)
!nvidia-smi
!echo "Threads/core: $(nproc --all)"
!free -h
## initiate GCP storage access
import gcsfs
fs = gcsfs.GCSFileSystem(cache_timeout = 0)
file_path = "gs://shakdemo-hyperplane/data/synthetic_data"
file_list = fs.ls(file_path)
print(f"number of files {len(file_list)}")
# read data
data_cpu = []
for file in tqdm(file_list):
data_cpu.append(pd.read_csv(f"gs://{file}"))
data_cpu = pd.concat(data_cpu, ignore_index = True)
print(data_cpu.shape)
print(f'data size {data_cpu.memory_usage(deep = True).sum()/1024/1024/1024 :.2f} G')
# Plot the raw data and labels.
samples_frac = 0.01
df_sample = data_cpu.sample(frac = samples_frac)
# fig = plt.figure(figsize=(16,10))
# plt.scatter(df_sample['x'], df_sample['y'], c=df_sample['label'], s=20, cmap='viridis')
df_sample.shape
%%time
# Instantiate, train and predict.
kmeans_cpu = cpuKMeans(init="k-means++",
n_clusters=6,
random_state=0)
kmeans_cpu.fit(df_sample[['x','y']])
labels_cpu = kmeans_cpu.predict(df_sample[['x','y']])
%%time
# Instantiate, train and predict.
data_gpu = cudf.DataFrame(data_cpu)
%%time
kmeans_gpu = gpuKMeans(init="k-means||",
n_clusters=6,
random_state=0)
kmeans_gpu.fit(data_gpu[['x','y']])
labels_gpu = kmeans_gpu.predict(data_gpu[['x','y']])
## spin up a remote dask cluster
from hyperplane import notebook_common as nc
client, cluster = nc.initialize_cluster(
num_workers = 2,
ngpus = 1,
nprocs=1,
nthreads=12,
ram_gb_per_proc=80,
cores_per_worker=12,
scheduler_ram = "4Gi",
scheduler_cpu = "1500m",
node_selector = '',
)
client.nthreads()
%%time
import dask_cudf
file_path = "gs://shakdemo-hyperplane/data/synthetic_data/*.csv"
ddf = dask_cudf.read_csv(file_path)
ddf.head(2)
ddf.npartitions
ddf = client.persist(ddf)
ddf.map_partitions(len).compute().sum()
from cuml.dask.cluster.kmeans import KMeans as mnmgKMeans
%%time
# Setup the Dask task graph.
# Instantiate, train and predict.
kmeans_mnmg = mnmgKMeans(init="k-means||",
n_clusters=6,
random_state=0)
kmeans_mnmg.fit(ddf[['x','y']])
kmeans_mnmg_df = kmeans_mnmg.predict(ddf[['x','y']])
# Execute the Dask task graph.
labels_mnmg = kmeans_mnmg_df.compute()
# Display the output.
print('MNMG k-means labels:')
print(labels_mnmg.value_counts())
cluster.close()
| 0.493653 | 0.761893 |
# 모델 비교
## model
1. kcbert
2. kcbert fine-tuned with nsmc dataset
3. kcbert fine-tuned with naver shopping review dataset
```
import torch
from torch import nn, Tensor
from torch.optim import Optimizer
from torch.utils.data import DataLoader, RandomSampler, DistributedSampler, random_split
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.nn import CrossEntropyLoss
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.metrics.functional import accuracy, precision, recall
from transformers import AdamW, BertForSequenceClassification, AdamW, BertConfig, AutoTokenizer, BertTokenizer, TrainingArguments
from keras.preprocessing.sequence import pad_sequences
import random
import numpy as np
import time
import datetime
import pandas as pd
import os
from tqdm import tqdm
import pandas as pd
from transformers import AutoTokenizer, AutoModelWithLMHead
from keras.preprocessing.sequence import pad_sequences
if torch.cuda.is_available():
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
pj_path = os.getenv('HOME') + '/Projects/JeongCheck'
test_set = pd.read_csv(pj_path + '/crawling_data/google_reviews.csv', index_col=0)
print(len(test_set))
test_set.head()
def check_make_df(place, data):
print(f'##### {place} #####')
print(data.head)
print(f'{place} len :', len(data))
if any(data.isna().sum()) == True:
test_set.dropna(inplace=True)
print('nan values check :', any(data.isna().sum()))
neutral_portion = len(data[data.label==2]) / len(data)
print('neutral label portion :', neutral_portion)
new_data = data[data.label != 2]
data = new_data
print('final length of data :', len(new_data))
return data
test_set = check_make_df('total', test_set)
from transformers import BertForSequenceClassification, AdamW, BertConfig
tokenizer = AutoTokenizer.from_pretrained("beomi/kcbert-base")
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
model_kc = BertForSequenceClassification.from_pretrained(
"beomi/kcbert-base",
num_labels = 2,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model_nsmc = BertForSequenceClassification.from_pretrained(
pj_path + "/checkpoint-1500",
num_labels = 2,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model_sent = BertForSequenceClassification.from_pretrained(
pj_path + "/bert_model/checkpoint-2000",
num_labels = 2,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
def param_check(model):
params = list(model.named_parameters())
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
param_check(model_kc)
param_check(model_nsmc)
param_check(model_sent)
device = "cuda:0"
model_kc = model_kc.to(device)
model_nsmc = model_nsmc.to(device)
model_sent = model_sent.to(device)
def convert_input_data(sentences):
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
MAX_LEN = 64
# 토큰을 숫자 인덱스로 변환
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# 어텐션 마스크 초기화
attention_masks = []
# 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
inputs = torch.tensor(input_ids)
masks = torch.tensor(attention_masks)
return inputs, masks
def test_sentences(sentences, model):
# 평가모드로 변경!!!!!
model.eval()
inputs, masks = convert_input_data(sentences)
# 데이터를 GPU에 넣음
b_input_ids = inputs.to(device)
b_input_mask = masks.to(device)
# 그래디언트 계산 안함
with torch.no_grad():
# Forward 수행
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# 로스 구함
logits = outputs[0]
# CPU로 데이터 이동
logits = logits.detach().cpu().numpy()
return logits
```
## 데이터 변환
```
def preprocessing(df):
df.document=df.comment.replace('[^A-Za-zㄱ-ㅎㅏ-ㅣ가-힣]+','')
return df
# result = preprocessing(gr_data)
# result = result.dropna()
# print(result)
# 감성분석할 comment 추출
def export_com(preprocessed_df):
sens =[]
for sen in preprocessed_df.comment:
sens.append(sen)
print('check length :', len(sens), len(preprocessed_df)) # 개수 확인
print('sample sentence :', sens[1])
return sens
```
label : neg(0) p(1) neut(2)
pred : idx 0, 1, 2 ->
```
def make_predicted_label(sen, model):
sen = [sen]
score = test_sentences(sen, model)
result = np.argmax(score)
if result == 0: # negative
return 0
elif result == 1: # positive
return 1
def predict_label(model, df, place_name):
result = preprocessing(df)
result = result.dropna()
sens = export_com(result)
scores_data=[]
for sen in sens:
scores_data.append(make_predicted_label(sen, model))
df['pred'] = scores_data
cor = df[df.label == df.pred]
uncor = df[df.label != df.pred]
print('correct prediction num :', len(cor))
print('uncorrect prediction num :', len(uncor))
print('correct label check :' ,set(cor.label))
df.to_csv(pj_path + f'/prediction_data/{place_name}_pred.csv')
return df
```
## Loss (RMSE)
```
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import math
def rmse(y, y_pred):
from sklearn.metrics import mean_squared_error
import math
rmse_label = math.sqrt(mean_squared_error(y, y_pred))
print('rmse of label :', rmse_label)
```
## Accuracy
```
def acc(y, y_pred, total):
correct = (y_pred == y).sum().item()
print(f'Accuracy of the network on the {total} test text: %d %%' % (
100 * correct / total))
```
## F-1 score
```
from sklearn.metrics import f1_score, classification_report
def f1(y, y_pred):
score = f1_score(y, y_pred)
report = classification_report(y, y_pred)
print('## f1-score ##')
print('f1 score :', score)
print('## classification report ##')
print(report)
```
## calculate performance
- RMSE
- Accuracy
- f1-score
```
def cal_perform(df):
y = df.label
y_pred = df.pred
if len(y) == len(y_pred):
total = len(y)
print('same length')
print('label length :', total)
else:
print('different length')
rmse(y, y_pred)
acc(y, y_pred, total)
f1(y, y_pred)
```
## 모델 별 성능 지표 계산
```
model_list = [model_kc, model_nsmc, model_sent]
model_list_idx = [[idx, model] for idx, model in enumerate(model_list)]
model_name = ['model_kc', 'model_nsmc', 'model_sent']
for model, name in zip(model_list_idx, model_name):
print(f'===== {name} predict .... =====')
data = predict_label(model[-1], test_set, model[0])
cal_perform(data)
```
|
github_jupyter
|
import torch
from torch import nn, Tensor
from torch.optim import Optimizer
from torch.utils.data import DataLoader, RandomSampler, DistributedSampler, random_split
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.nn import CrossEntropyLoss
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.metrics.functional import accuracy, precision, recall
from transformers import AdamW, BertForSequenceClassification, AdamW, BertConfig, AutoTokenizer, BertTokenizer, TrainingArguments
from keras.preprocessing.sequence import pad_sequences
import random
import numpy as np
import time
import datetime
import pandas as pd
import os
from tqdm import tqdm
import pandas as pd
from transformers import AutoTokenizer, AutoModelWithLMHead
from keras.preprocessing.sequence import pad_sequences
if torch.cuda.is_available():
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
pj_path = os.getenv('HOME') + '/Projects/JeongCheck'
test_set = pd.read_csv(pj_path + '/crawling_data/google_reviews.csv', index_col=0)
print(len(test_set))
test_set.head()
def check_make_df(place, data):
print(f'##### {place} #####')
print(data.head)
print(f'{place} len :', len(data))
if any(data.isna().sum()) == True:
test_set.dropna(inplace=True)
print('nan values check :', any(data.isna().sum()))
neutral_portion = len(data[data.label==2]) / len(data)
print('neutral label portion :', neutral_portion)
new_data = data[data.label != 2]
data = new_data
print('final length of data :', len(new_data))
return data
test_set = check_make_df('total', test_set)
from transformers import BertForSequenceClassification, AdamW, BertConfig
tokenizer = AutoTokenizer.from_pretrained("beomi/kcbert-base")
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
model_kc = BertForSequenceClassification.from_pretrained(
"beomi/kcbert-base",
num_labels = 2,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model_nsmc = BertForSequenceClassification.from_pretrained(
pj_path + "/checkpoint-1500",
num_labels = 2,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model_sent = BertForSequenceClassification.from_pretrained(
pj_path + "/bert_model/checkpoint-2000",
num_labels = 2,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
def param_check(model):
params = list(model.named_parameters())
print('The BERT model has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
param_check(model_kc)
param_check(model_nsmc)
param_check(model_sent)
device = "cuda:0"
model_kc = model_kc.to(device)
model_nsmc = model_nsmc.to(device)
model_sent = model_sent.to(device)
def convert_input_data(sentences):
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
MAX_LEN = 64
# 토큰을 숫자 인덱스로 변환
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]
# 문장을 MAX_LEN 길이에 맞게 자르고, 모자란 부분을 패딩 0으로 채움
input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
# 어텐션 마스크 초기화
attention_masks = []
# 어텐션 마스크를 패딩이 아니면 1, 패딩이면 0으로 설정
for seq in input_ids:
seq_mask = [float(i>0) for i in seq]
attention_masks.append(seq_mask)
inputs = torch.tensor(input_ids)
masks = torch.tensor(attention_masks)
return inputs, masks
def test_sentences(sentences, model):
# 평가모드로 변경!!!!!
model.eval()
inputs, masks = convert_input_data(sentences)
# 데이터를 GPU에 넣음
b_input_ids = inputs.to(device)
b_input_mask = masks.to(device)
# 그래디언트 계산 안함
with torch.no_grad():
# Forward 수행
outputs = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask)
# 로스 구함
logits = outputs[0]
# CPU로 데이터 이동
logits = logits.detach().cpu().numpy()
return logits
def preprocessing(df):
df.document=df.comment.replace('[^A-Za-zㄱ-ㅎㅏ-ㅣ가-힣]+','')
return df
# result = preprocessing(gr_data)
# result = result.dropna()
# print(result)
# 감성분석할 comment 추출
def export_com(preprocessed_df):
sens =[]
for sen in preprocessed_df.comment:
sens.append(sen)
print('check length :', len(sens), len(preprocessed_df)) # 개수 확인
print('sample sentence :', sens[1])
return sens
def make_predicted_label(sen, model):
sen = [sen]
score = test_sentences(sen, model)
result = np.argmax(score)
if result == 0: # negative
return 0
elif result == 1: # positive
return 1
def predict_label(model, df, place_name):
result = preprocessing(df)
result = result.dropna()
sens = export_com(result)
scores_data=[]
for sen in sens:
scores_data.append(make_predicted_label(sen, model))
df['pred'] = scores_data
cor = df[df.label == df.pred]
uncor = df[df.label != df.pred]
print('correct prediction num :', len(cor))
print('uncorrect prediction num :', len(uncor))
print('correct label check :' ,set(cor.label))
df.to_csv(pj_path + f'/prediction_data/{place_name}_pred.csv')
return df
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import math
def rmse(y, y_pred):
from sklearn.metrics import mean_squared_error
import math
rmse_label = math.sqrt(mean_squared_error(y, y_pred))
print('rmse of label :', rmse_label)
def acc(y, y_pred, total):
correct = (y_pred == y).sum().item()
print(f'Accuracy of the network on the {total} test text: %d %%' % (
100 * correct / total))
from sklearn.metrics import f1_score, classification_report
def f1(y, y_pred):
score = f1_score(y, y_pred)
report = classification_report(y, y_pred)
print('## f1-score ##')
print('f1 score :', score)
print('## classification report ##')
print(report)
def cal_perform(df):
y = df.label
y_pred = df.pred
if len(y) == len(y_pred):
total = len(y)
print('same length')
print('label length :', total)
else:
print('different length')
rmse(y, y_pred)
acc(y, y_pred, total)
f1(y, y_pred)
model_list = [model_kc, model_nsmc, model_sent]
model_list_idx = [[idx, model] for idx, model in enumerate(model_list)]
model_name = ['model_kc', 'model_nsmc', 'model_sent']
for model, name in zip(model_list_idx, model_name):
print(f'===== {name} predict .... =====')
data = predict_label(model[-1], test_set, model[0])
cal_perform(data)
| 0.810704 | 0.890913 |
## Problem
- Let's say we are maintaining a mapping of key-value pairs, with the key being the name of a movie and the value being the list of reviews for that movie.
- How do we check the number of reviews, insert a new a review for a given movie in the most "pythonic" way possible?
```
all_movie_names = ['Python3 beats Python2', 'Python2 end game', 'Python3 is the future']
# mapping of reviewed movies only
movie_reviews = {
'Python2 end game': [4.2, 5.0, 3.9, 2.5],
'Python3 beats Python2': [2.3, 4.2, 4.9]
}
```
## Answer
- use .get(), .setdefault(), and defaultdict() wisely
#### 0. getting the number of reviews per movie
```
# BAD WAY: since does not handle the case where the movie has not yet been reviewed
for movie in all_movie_names:
count = len(movie_reviews[movie]) #<0>
print(f'[{movie}] was reviewed [{count}] times')
# BAD WAY: does the job but is not very pythonic
for movie in all_movie_names:
try:
count = len(movie_reviews[movie])
except KeyError:
count = 0
print(f'[{movie}] was reviewed [{count}] times')
# GOOD WAY: does the job and is pythonic
for movie in all_movie_names:
count = len(movie_reviews.get(movie, [])) #<1>
print(f'[{movie}] was reviewd [{count}] times')
```
#### 1. Insert a new review for an already or not yet reviewed movie
- let's say a user reviewed all the movies and gave a score of 4.0 to all of them.
```
# BAD WAY: not pythonic at all.
movie_reviews = {
'Python2 end game': [4.2, 5.0, 3.9, 2.5],
'Python3 beats Python2': [2.3, 4.2, 4.9]
}
for movie in all_movie_names:
if movie in movie_reviews: # case where the movie has already been reviewed
movie_reviews[movie].append(4.0)
else:
movie_reviews[movie] = [4.0]
print(movie_reviews)
# GOOD WAY: but could be better.
movie_reviews = {
'Python2 end game': [4.2, 5.0, 3.9, 2.5],
'Python3 beats Python2': [2.3, 4.2, 4.9]
}
for movie in all_movie_names:
movie_reviews.setdefault(movie, []).append(4.0) #<2>
print(movie_reviews)
# BEST WAY:
from collections import defaultdict
movie_reviews = defaultdict(list) #<3>
print(movie_reviews['crazy movie']) #<3>
print(movie_reviews)
movie_reviews['Python2 end game'] = [4.2, 5.0, 3.9, 2.5]
movie_reviews['Python3 beats Python2'] = [2.3, 4.2, 4.9]
print(movie_reviews)
for movie in all_movie_names:
movie_reviews[movie].append(4.0) #<4>
print(movie_reviews)
```
## Discussion
- <0> using the indexing syntax to get an item in a dictionary raise an exception if the key is not found.
- <1> the .get() method enables to return a default value if the key was not found.
- <2> .setdefault() would insert the key with the provided default value if not yet in the dict
- <3> telling python that every value in this dict should be initialized to the empty list if accessed for the first time
- <4> since the values are all list we are free to append without worries even if the key is not yet in the dict.
|
github_jupyter
|
all_movie_names = ['Python3 beats Python2', 'Python2 end game', 'Python3 is the future']
# mapping of reviewed movies only
movie_reviews = {
'Python2 end game': [4.2, 5.0, 3.9, 2.5],
'Python3 beats Python2': [2.3, 4.2, 4.9]
}
# BAD WAY: since does not handle the case where the movie has not yet been reviewed
for movie in all_movie_names:
count = len(movie_reviews[movie]) #<0>
print(f'[{movie}] was reviewed [{count}] times')
# BAD WAY: does the job but is not very pythonic
for movie in all_movie_names:
try:
count = len(movie_reviews[movie])
except KeyError:
count = 0
print(f'[{movie}] was reviewed [{count}] times')
# GOOD WAY: does the job and is pythonic
for movie in all_movie_names:
count = len(movie_reviews.get(movie, [])) #<1>
print(f'[{movie}] was reviewd [{count}] times')
# BAD WAY: not pythonic at all.
movie_reviews = {
'Python2 end game': [4.2, 5.0, 3.9, 2.5],
'Python3 beats Python2': [2.3, 4.2, 4.9]
}
for movie in all_movie_names:
if movie in movie_reviews: # case where the movie has already been reviewed
movie_reviews[movie].append(4.0)
else:
movie_reviews[movie] = [4.0]
print(movie_reviews)
# GOOD WAY: but could be better.
movie_reviews = {
'Python2 end game': [4.2, 5.0, 3.9, 2.5],
'Python3 beats Python2': [2.3, 4.2, 4.9]
}
for movie in all_movie_names:
movie_reviews.setdefault(movie, []).append(4.0) #<2>
print(movie_reviews)
# BEST WAY:
from collections import defaultdict
movie_reviews = defaultdict(list) #<3>
print(movie_reviews['crazy movie']) #<3>
print(movie_reviews)
movie_reviews['Python2 end game'] = [4.2, 5.0, 3.9, 2.5]
movie_reviews['Python3 beats Python2'] = [2.3, 4.2, 4.9]
print(movie_reviews)
for movie in all_movie_names:
movie_reviews[movie].append(4.0) #<4>
print(movie_reviews)
| 0.319121 | 0.979334 |
# Mask R-CNN - Inspect Custom Trained Model
Code and visualizations to test, debug, and evaluate the Mask R-CNN model.
```
import os
import cv2
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import skimage
import glob
# Root directory of the project
ROOT_DIR = os.getcwd()
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
import food
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
FOOD_WEIGHTS_PATH = "C:\FOOD\logs\food20191001T0029\mask_rcnn_food_0030.h5" # TODO: update this path
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
```
## Configurations
```
config = food.foodConfig()
FOOD_DIR = os.path.join(ROOT_DIR, "food/dataset")
# Override the training configurations with a few
# changes for inferencing.
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
```
## Notebook Preferences
```
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
DEVICE = "/cpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
```
## Load Validation Dataset
```
# Load validation dataset
dataset = food.foodDataset()
dataset.load_food(FOOD_DIR, "val")
# Must call before using the dataset
dataset.prepare()
print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
```
## Load Model
```
# Create model in inference mode
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
# load the last model you trained
# weights_path = model.find_last()[1]
# Load weights
print("Loading weights ", custom_WEIGHTS_PATH)
model.load_weights(custom_WEIGHTS_PATH, by_name=True)
from importlib import reload # was constantly changin the visualization, so I decided to reload it instead of notebook
reload(visualize)
```
# Run Detection on Images
```
image_id = random.choice(dataset.image_ids)
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
info = dataset.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
dataset.image_reference(image_id)))
# Run object detection
results = model.detect([image], verbose=1)
# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax,
title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
```
|
github_jupyter
|
import os
import cv2
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import skimage
import glob
# Root directory of the project
ROOT_DIR = os.getcwd()
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
import food
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
FOOD_WEIGHTS_PATH = "C:\FOOD\logs\food20191001T0029\mask_rcnn_food_0030.h5" # TODO: update this path
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
config = food.foodConfig()
FOOD_DIR = os.path.join(ROOT_DIR, "food/dataset")
# Override the training configurations with a few
# changes for inferencing.
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
DEVICE = "/cpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# Load validation dataset
dataset = food.foodDataset()
dataset.load_food(FOOD_DIR, "val")
# Must call before using the dataset
dataset.prepare()
print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
# Create model in inference mode
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
# load the last model you trained
# weights_path = model.find_last()[1]
# Load weights
print("Loading weights ", custom_WEIGHTS_PATH)
model.load_weights(custom_WEIGHTS_PATH, by_name=True)
from importlib import reload # was constantly changin the visualization, so I decided to reload it instead of notebook
reload(visualize)
image_id = random.choice(dataset.image_ids)
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, config, image_id, use_mini_mask=False)
info = dataset.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
dataset.image_reference(image_id)))
# Run object detection
results = model.detect([image], verbose=1)
# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax,
title="Predictions")
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
| 0.398758 | 0.839208 |
<a href="https://colab.research.google.com/github/john-s-butler-dit/Numerical-Analysis-Python/blob/master/Chapter%2005%20-%20IVP%20Consistent%20Convergence%20Stability/502_Convergent.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Convergence of a Multistep Method
#### John S Butler
[email protected]
[Course Notes](https://johnsbutler.netlify.com/files/Teaching/Numerical_Analysis_for_Differential_Equations.pdf) [Github](https://github.com/john-s-butler-dit/Numerical-Analysis-Python)
## Overview
A one-step or multistep method is used to approximate the solution of an initial value problem of the form
\begin{equation} \frac{dy}{dt}=f(t,y),\end{equation}
with the initial condition
\begin{equation} y(a)=\alpha.\end{equation}
The method should only be used if it satisfies the three criteria:
1. that difference equation is __consistent__ with the differential equation;
2. that the numerical solution __convergent__ to the exact answer of the differential equation;
3. that the numerical solution is __stable__.
In the notebooks in this folder we will illustate examples of consistent and inconsistent, convergent and non-convergent, and stable and unstable methods.
## Introduction to Convergence
In this notebook we will illustate an non-convergent method. The video below outlines the notebook.
```
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/skJSvK52nq0" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
```
### Definition
The solution of a multi-step methods $w_i$ is said to be __convergent__ with respect to
the exact solution of the differential equation if
\begin{equation} \max_{h \rightarrow 0}\max_{1 \leq i \leq N}|y(t_i)-w_i|=0.\end{equation}
All the Runge Kutta, Adams-Bashforth and Adams-Moulton methods are convergent.
## 2-step Abysmal Butler Multistep Method
This notebook will illustrate a non-convergent multistep method using the Abysmal-Butler method, named with great pride after the author.
The 2-step Abysmal Butler difference equation is given by
\begin{equation}w_{i+1} = w_{i} + \frac{h}{2}(4f(t_i,w_i)-3f(t_{i-1},w_{i-1})),\end{equation}
The final section of this notebooks shows that the method is non-convergent for all differential equations.
## Intial Value Problem
To illustrate convergence we will apply Abysmal-Butler multistep method to the linear intial value problem
\begin{equation} y^{'}=t-y, \ \ (0 \leq t \leq 2) \end{equation}
with the initial condition
\begin{equation}y(0)=1,\end{equation}
with the exact solution
\begin{equation}y(t)= 2e^{-t}+t-1.\end{equation}
## Python Libraries
```
import numpy as np
import math
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt # side-stepping mpl backend
import matplotlib.gridspec as gridspec # subplots
import warnings
warnings.filterwarnings("ignore")
```
### Defining the function
\begin{equation}f(t,y)=t-y.\end{equation}
```
def myfun_ty(t,y):
return t-y
```
## Discrete Interval
To illustrtate that the method is internally convergent but not convergent with the exact solution we define two discrete meshes, one coarse and one fine.
### Coarse mesh
Defining the step size $h$ from the interval range $a \leq t \leq b$ and number of steps $N$
\begin{equation}h=\frac{b - a}{N}.\end{equation}
This gives the discrete time steps,
$$t_i=t_0+ih,$$
where $t_0=a,$ for $i=0,1...,N$.
### Fine mesh
Defining the step size $h/2$ from the interval range $a≤t≤b$ and number of steps $2N$
\begin{equation}h=\frac{b−a}{2N}.\end{equation}
This gives the discrete time steps,
\begin{equation}t_i=t_0+ih,\end{equation}
where $t_0=a,$ for $i =0,1,...2N$.
The plot below shows the coarse (red) and fine (green) discrete time intervals over the domain.
```
# Start and end of interval
b=2
a=0
# Step size
N=16
h=(b-a)/(N)
t=np.arange(a,b+h,h)
N2=N*2
h2=(b-a)/(N2)
t2=np.arange(a,b+h2,h2)
w2=np.zeros(len(t2))
fig = plt.figure(figsize=(10,4))
plt.plot(t2,0.01*np.ones(len(t2)),'o:',color='green',label='Fine Mesh')
plt.plot(t,0*t,'o:',color='red',label='Coarse Mesh')
plt.xlim((0,2))
plt.ylim((-0.1,.1))
plt.legend()
plt.title('Illustration of discrete time points')
plt.show()
```
## 2-step Abysmal Butler Method
The 2-step Abysmal Butler difference equation is
\begin{equation}w_{i+1} = w_{i} + \frac{h}{2}(4(t_i-w_i)-3(t_{i-1}-w_{i-1})), \end{equation}
for $i=1,...N.$
For $i=0$ the system of difference equation is:
\begin{equation}w_{1} = w_{0} + \frac{h}{2}(4(t_0-w_0)-3(t_{-1}-w_{-1})) \end{equation}
this is not solvable as <font color='red'> $w_{-1}$ </font> is unknown.
For $i=1$ the difference equation is:
\begin{equation}w_{2} = w_{1} + \frac{h}{2}(4(t_1-w_1)-3(t_{0}-w_{0})) \end{equation}
this is not solvable a <font color='red'> $w_{1}$ </font> is unknown. $w_1$ can be approximated using a one step method. Here, as the exact solution is known,
\begin{equation}w_1=2e^{-t_1}+t_1-1.\end{equation}
```
### Initial conditions
IC=1
w=np.zeros(len(t))
w[0]=IC
w2=np.zeros(len(t2))
w2[0]=IC
w2[1]=(IC+1)*np.exp(-t2[1])+t2[1]-1
```
### Loop
```
# Fine Mesh
for k in range (1,N2):
w2[k+1]=(w2[k]+h2/2.0*(4*myfun_ty(t2[k],w2[k])-3*myfun_ty(t2[k-1],w2[k-1])))
w[1]=w2[2]
# Coarse Mesh
for k in range (1,N):
w[k+1]=(w[k]+h/2.0*(4*myfun_ty(t[k],w[k])-3*myfun_ty(t[k-1],w[k-1])))
```
### Plotting solution
```
def plotting(t,w,t2,w2):
y=(2)*np.exp(-t2)+t2-1
fig = plt.figure(figsize=(10,4))
plt.plot(t,w,'^:',color='red',label='Coarse Mesh (N)')
plt.plot(t2,w2, 'v-',color='green',label='Fine Mesh (2N)')
plt.plot(t2,y, 'o-',color='black',label='Exact?')
plt.xlabel('time')
plt.legend()
plt.title('Abysmal Butler')
plt.show
```
The plot below shows the Abysmal-Butler approximation for a low N (red) and $N\times2$ (green) and the exact solution (black) of the intial value problem
```
plotting(t,w,t2,w2)
```
## Convergent
The Abysmal-Butler method does satisfy the Lipschitz condition:
\begin{equation}F(t,w:h)-F(t,\hat{w}:h)=\frac{4}{2}f(t,w_i)-\frac{3}{2}f(t-h,w_{i-1}))-(\frac{4}{2}f(t,\hat{w}_{i})-\frac{3}{2}f(t-h,\hat{w}_{i-1})))\end{equation}
\begin{equation}F(t,w:h)-F(t,\hat{w}:h)=\frac{4}{2}(f(t,w_i)-f(t,\hat{w}_i))-\frac{3}{2}(f(t-h,w_{i-1}))-f(t-h,\hat{w}_{i-1})))\end{equation}
\begin{equation}F(t,w:h)-F(t,\hat{w}:h)\leq\frac{4}{2}L|w_i-\hat{w_i}|+\frac{3}{2}L|w-\hat{w}|\leq \frac{7}{2} L|w_i-\hat{w_i}|\end{equation}
This means it is internally convergent,
\begin{equation}|w_i-\hat{w_i}|\rightarrow 0\end{equation}
as $h \rightarrow 0$,
but as it is not consistent it will never converge to the exact solution
\begin{equation} |y_i-w_i| \not= 0.\end{equation}
```
d = {'time': t[0:5], 'Abysmal Butler': w[0:5],'Abysmal Butler w2 N*2': w2[0:10:2],
'w-w2':np.abs(w[0:5]-w2[0:10:2])}
df = pd.DataFrame(data=d)
df
```
|
github_jupyter
|
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/skJSvK52nq0" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
import numpy as np
import math
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt # side-stepping mpl backend
import matplotlib.gridspec as gridspec # subplots
import warnings
warnings.filterwarnings("ignore")
def myfun_ty(t,y):
return t-y
# Start and end of interval
b=2
a=0
# Step size
N=16
h=(b-a)/(N)
t=np.arange(a,b+h,h)
N2=N*2
h2=(b-a)/(N2)
t2=np.arange(a,b+h2,h2)
w2=np.zeros(len(t2))
fig = plt.figure(figsize=(10,4))
plt.plot(t2,0.01*np.ones(len(t2)),'o:',color='green',label='Fine Mesh')
plt.plot(t,0*t,'o:',color='red',label='Coarse Mesh')
plt.xlim((0,2))
plt.ylim((-0.1,.1))
plt.legend()
plt.title('Illustration of discrete time points')
plt.show()
### Initial conditions
IC=1
w=np.zeros(len(t))
w[0]=IC
w2=np.zeros(len(t2))
w2[0]=IC
w2[1]=(IC+1)*np.exp(-t2[1])+t2[1]-1
# Fine Mesh
for k in range (1,N2):
w2[k+1]=(w2[k]+h2/2.0*(4*myfun_ty(t2[k],w2[k])-3*myfun_ty(t2[k-1],w2[k-1])))
w[1]=w2[2]
# Coarse Mesh
for k in range (1,N):
w[k+1]=(w[k]+h/2.0*(4*myfun_ty(t[k],w[k])-3*myfun_ty(t[k-1],w[k-1])))
def plotting(t,w,t2,w2):
y=(2)*np.exp(-t2)+t2-1
fig = plt.figure(figsize=(10,4))
plt.plot(t,w,'^:',color='red',label='Coarse Mesh (N)')
plt.plot(t2,w2, 'v-',color='green',label='Fine Mesh (2N)')
plt.plot(t2,y, 'o-',color='black',label='Exact?')
plt.xlabel('time')
plt.legend()
plt.title('Abysmal Butler')
plt.show
plotting(t,w,t2,w2)
d = {'time': t[0:5], 'Abysmal Butler': w[0:5],'Abysmal Butler w2 N*2': w2[0:10:2],
'w-w2':np.abs(w[0:5]-w2[0:10:2])}
df = pd.DataFrame(data=d)
df
| 0.338077 | 0.990044 |
## Multivariate decision trees
In this section we show how to develop the OC1 decision tree method.
OC1 classifier is divided into several steps:
1. Get all possible hyperplanes $H_{i}$.
2. Choose one.
3. Perturb and find $v_{j}$.
4. Calculate gini index of each $H_{i}$.
5. Choose $H_{i}$ with lowest gini index.
Let's import required libraries.
```
import numpy as np
from random import randint, random
import pydot
```
We should restore the ``data_set`` and ``labels`` from the previous notebook.
```
%store -r data_set
%store -r labels
```
The Leaf class is the same as below.
```
class BinaryLeaf:
def __init__(self, elements, labels, ids):
self.L = None
self.R = None
self.elements = elements
self.labels = labels
self.completed = False
self.ids = ids
def set_R(self, Rleaf):
self.R = Rleaf
def set_L(self, Lleaf):
self.L = Lleaf
def set_elements(self, elements):
self.elements = elements
def get_elements(self):
return self.elements
def get_L(self):
return self.L
def get_R(self):
return self.R
def set_completed(self):
self.completed = True
def is_completed(self):
return self.completed
def get_labels(self):
return self.labels
def set_ids(self, ids):
self.ids = ids
def get_ids(self):
return self.ids
```
Compare to C4.5 and CART we have one more variable ``R`` which is a parameter that is used to set the number of loops to randomly choose the feature to check if feature change can give better split. See ``build_level()``.
```
ids = list(range(len(data_set)))
root = BinaryLeaf(data_set, labels, ids)
current_node = root
R = 10
```
In the method below we compute the $V_{j}$ which gives us the knowledge if a given object is above or below the hiperplane. It can be formulated as:
$\sum_{i=1}^{d}a_{i}x_{i}+a_{d+1}>0$, where $a_{1},\ldots,a_{d+1}$ are coefficients. In our case $a_{d+1}$ is our label.
```
def compute_v(element, scv):
return np.sum(np.multiply(element, scv[:-1])) + scv[-1]
def compare_two_leafs(leaf1, leaf2):
labels1 = leaf1.labels
labels2 = leaf2.labels
if len(labels1) == len(labels2):
for i in range(len(labels1)):
if labels1[i] != labels2[i]:
return False
return True
return False
def is_leaf_completed(node):
if node.is_completed():
if node.get_L() != None and not node.get_L().is_completed():
return node.get_L()
elif node.get_R() != None and not node.get_R().is_completed():
return node.get_R()
elif node.get_L() == None and node.get_R() == None:
return None
elif node.get_L().is_completed() or node.get_R().is_completed():
new_node = is_leaf_completed(node.get_L())
if new_node == None:
return is_leaf_completed(node.get_R())
else:
return new_node
else:
return None
return node
```
The gini index can be calculated as shown below. Please keep in mind that it's only the gini index for a given split and need to be subtracted with 1 as shown in ``get_all_possible_splits_by_gini`` method.
```
def calculate_gini(labels):
unique_labels = np.unique(labels)
gini = 0
for label in unique_labels:
found = np.where(labels == label)
gini = gini + len(found)/len(labels)
return np.square(gini)
```
In the method below we calculated all possible hyperplane by calculating the gini indices for each feature. It is kind of similar to what we have done in CART method, but it will be "fixed" during the perturb part of the OC1 method.
```
def get_all_possible_splits_by_gini(leaf):
leaf_elements = leaf.elements
labels = leaf.labels
ginis = []
for i in range(len(leaf_elements[0])):
feature_ginis = []
feature_column = np.array(leaf_elements)[:, i]
for feature in feature_column:
distinguish = feature_column <= feature
left_labels = np.array(labels)[distinguish]
right_labels = np.array(labels)[~distinguish]
gini = 1 - calculate_gini(left_labels) - calculate_gini(right_labels)
feature_ginis.append([feature,gini])
ginis.append(min(feature_ginis))
return ginis
```
We have also a method to find the current leaf to be splitted. It uses the utils that have implemented above.
```
def find_current_level_data(root):
return is_leaf_completed(root)
```
The next step is to divide objects in the leaf into two sets which are above and below the hyperplane.
```
def divide_data_hiperplane(leaf,scv):
below = []
above = []
below_labels = []
above_labels = []
below_ids = []
above_ids = []
for i in range(len(leaf.elements)):
v = compute_v(leaf.elements[i],scv) > 0
if v:
above.append(leaf.elements[i])
above_labels.append(leaf.labels[i])
above_ids.append(leaf.ids[i])
else:
below.append(leaf.elements[i])
below_labels.append(leaf.labels[i])
below_ids.append(leaf.ids[i])
return np.array(below), np.array(above), np.array(below_labels), np.array(above_labels), below_ids, above_ids
```
The coefficients that we have used above can be calculated as following:
```
def get_coefficiency(splits):
splits = np.array(splits)
scv = np.zeros(len(splits)+1)
min_split_index = np.argmin(splits[:,1])
scv[min_split_index] = 1
scv[-1] = -splits[min_split_index][1]
return scv
```
To compute the assignment array can be calculated as: $U_{j}=\frac{a_{m}x_{jm}-V_{j}}{x_{jm}}$.
```
def compute_u(element, scv, feature):
return (scv[feature] * element[feature] - compute_v(element, scv)) / element[feature]
```
A short method for sorting the $U$ for the split can be implemented as below. We use it in the perturb function below.
```
def sort_u(element):
return np.sort(element)
```
Perturb function is the core part of the OC1 method. It calculates different gini indices for different feature combinations. We get the combination with best gini index. We "fix" the previously calculated coefficients.
```
def perturb(leaf, scv, feature, old_gini):
u=[]
for element in leaf.elements:
u.append(compute_u(element, scv, feature))
splits = sort_u(np.array(u))
am = []
for split in splits:
new_scv = scv
new_scv[feature] = split
below, above, below_label, above_label, below_ids, above_ids = divide_data_hiperplane(leaf, scv)
gini = 1 - (calculate_gini(below_label) - calculate_gini(above_label))
am.append([new_scv, gini])
am = np.array(am)
best_split_index = np.argmin(am[:,1])
if am[best_split_index][1] < old_gini:
return am[best_split_index][1], am[best_split_index][0]
elif am[best_split_index][1] == old_gini:
if random() < 0.3:
return am[best_split_index][1], am[best_split_index][0]
return old_gini, scv
```
The build_level method combine the above functions and split the data into two leafs, assign it and/or stop building the tree if no more leafs to be divided are found.
```
def build_level(root, split_history):
leaf = find_current_level_data(root)
if leaf == None:
return
splits = get_all_possible_splits_by_gini(leaf)
split_coefficiency_vector = get_coefficiency(splits)
below,above, below_label, above_label, below_ids, above_ids = divide_data_hiperplane(leaf,split_coefficiency_vector)
gini = 1 - calculate_gini(below_label) - calculate_gini(above_label)
for c in range(R):
feature = randint(0,len(leaf.elements[0])-1)
gini, split_coefficiency_vector = perturb(leaf, split_coefficiency_vector, feature, gini)
below, above, below_label, above_label, below_ids, above_ids = divide_data_hiperplane(leaf,split_coefficiency_vector)
left_leaf = BinaryLeaf(below, below_label, below_ids)
right_leaf = BinaryLeaf(above, above_label, above_ids)
split_history.append([str(leaf.ids), str(left_leaf.ids)])
split_history.append([str(leaf.ids), str(right_leaf.ids)])
leaf.set_completed()
if len(np.unique(below_label)) == 1:
left_leaf.set_completed()
if len(np.unique(above_label)) == 1:
right_leaf.set_completed()
if compare_two_leafs(leaf, left_leaf) or compare_two_leafs(leaf,right_leaf):
leaf.set_completed()
else:
leaf.set_R(right_leaf)
leaf.set_L(left_leaf)
build_level(root, split_history)
return root, split_history
```
Execute the level building function.
```
def build(root):
split_history = []
return build_level(root, split_history)
oc1_tree, split_history_oc1 = build(root)
```
Plot function is the same as in the previous methods:
```
def plot_tree(split_history):
tree = pydot.Dot(graph_type='graph')
for split in split_history:
new_edge = pydot.Edge(split[0], split[1])
tree.add_edge(new_edge)
tree.write('oc1_tree.png', format='png')
plot_tree(split_history_oc1)
```
And display the tree:
```
from IPython.display import Image
Image(filename='oc1_tree.png')
```
|
github_jupyter
|
import numpy as np
from random import randint, random
import pydot
%store -r data_set
%store -r labels
class BinaryLeaf:
def __init__(self, elements, labels, ids):
self.L = None
self.R = None
self.elements = elements
self.labels = labels
self.completed = False
self.ids = ids
def set_R(self, Rleaf):
self.R = Rleaf
def set_L(self, Lleaf):
self.L = Lleaf
def set_elements(self, elements):
self.elements = elements
def get_elements(self):
return self.elements
def get_L(self):
return self.L
def get_R(self):
return self.R
def set_completed(self):
self.completed = True
def is_completed(self):
return self.completed
def get_labels(self):
return self.labels
def set_ids(self, ids):
self.ids = ids
def get_ids(self):
return self.ids
ids = list(range(len(data_set)))
root = BinaryLeaf(data_set, labels, ids)
current_node = root
R = 10
def compute_v(element, scv):
return np.sum(np.multiply(element, scv[:-1])) + scv[-1]
def compare_two_leafs(leaf1, leaf2):
labels1 = leaf1.labels
labels2 = leaf2.labels
if len(labels1) == len(labels2):
for i in range(len(labels1)):
if labels1[i] != labels2[i]:
return False
return True
return False
def is_leaf_completed(node):
if node.is_completed():
if node.get_L() != None and not node.get_L().is_completed():
return node.get_L()
elif node.get_R() != None and not node.get_R().is_completed():
return node.get_R()
elif node.get_L() == None and node.get_R() == None:
return None
elif node.get_L().is_completed() or node.get_R().is_completed():
new_node = is_leaf_completed(node.get_L())
if new_node == None:
return is_leaf_completed(node.get_R())
else:
return new_node
else:
return None
return node
def calculate_gini(labels):
unique_labels = np.unique(labels)
gini = 0
for label in unique_labels:
found = np.where(labels == label)
gini = gini + len(found)/len(labels)
return np.square(gini)
def get_all_possible_splits_by_gini(leaf):
leaf_elements = leaf.elements
labels = leaf.labels
ginis = []
for i in range(len(leaf_elements[0])):
feature_ginis = []
feature_column = np.array(leaf_elements)[:, i]
for feature in feature_column:
distinguish = feature_column <= feature
left_labels = np.array(labels)[distinguish]
right_labels = np.array(labels)[~distinguish]
gini = 1 - calculate_gini(left_labels) - calculate_gini(right_labels)
feature_ginis.append([feature,gini])
ginis.append(min(feature_ginis))
return ginis
def find_current_level_data(root):
return is_leaf_completed(root)
def divide_data_hiperplane(leaf,scv):
below = []
above = []
below_labels = []
above_labels = []
below_ids = []
above_ids = []
for i in range(len(leaf.elements)):
v = compute_v(leaf.elements[i],scv) > 0
if v:
above.append(leaf.elements[i])
above_labels.append(leaf.labels[i])
above_ids.append(leaf.ids[i])
else:
below.append(leaf.elements[i])
below_labels.append(leaf.labels[i])
below_ids.append(leaf.ids[i])
return np.array(below), np.array(above), np.array(below_labels), np.array(above_labels), below_ids, above_ids
def get_coefficiency(splits):
splits = np.array(splits)
scv = np.zeros(len(splits)+1)
min_split_index = np.argmin(splits[:,1])
scv[min_split_index] = 1
scv[-1] = -splits[min_split_index][1]
return scv
def compute_u(element, scv, feature):
return (scv[feature] * element[feature] - compute_v(element, scv)) / element[feature]
def sort_u(element):
return np.sort(element)
def perturb(leaf, scv, feature, old_gini):
u=[]
for element in leaf.elements:
u.append(compute_u(element, scv, feature))
splits = sort_u(np.array(u))
am = []
for split in splits:
new_scv = scv
new_scv[feature] = split
below, above, below_label, above_label, below_ids, above_ids = divide_data_hiperplane(leaf, scv)
gini = 1 - (calculate_gini(below_label) - calculate_gini(above_label))
am.append([new_scv, gini])
am = np.array(am)
best_split_index = np.argmin(am[:,1])
if am[best_split_index][1] < old_gini:
return am[best_split_index][1], am[best_split_index][0]
elif am[best_split_index][1] == old_gini:
if random() < 0.3:
return am[best_split_index][1], am[best_split_index][0]
return old_gini, scv
def build_level(root, split_history):
leaf = find_current_level_data(root)
if leaf == None:
return
splits = get_all_possible_splits_by_gini(leaf)
split_coefficiency_vector = get_coefficiency(splits)
below,above, below_label, above_label, below_ids, above_ids = divide_data_hiperplane(leaf,split_coefficiency_vector)
gini = 1 - calculate_gini(below_label) - calculate_gini(above_label)
for c in range(R):
feature = randint(0,len(leaf.elements[0])-1)
gini, split_coefficiency_vector = perturb(leaf, split_coefficiency_vector, feature, gini)
below, above, below_label, above_label, below_ids, above_ids = divide_data_hiperplane(leaf,split_coefficiency_vector)
left_leaf = BinaryLeaf(below, below_label, below_ids)
right_leaf = BinaryLeaf(above, above_label, above_ids)
split_history.append([str(leaf.ids), str(left_leaf.ids)])
split_history.append([str(leaf.ids), str(right_leaf.ids)])
leaf.set_completed()
if len(np.unique(below_label)) == 1:
left_leaf.set_completed()
if len(np.unique(above_label)) == 1:
right_leaf.set_completed()
if compare_two_leafs(leaf, left_leaf) or compare_two_leafs(leaf,right_leaf):
leaf.set_completed()
else:
leaf.set_R(right_leaf)
leaf.set_L(left_leaf)
build_level(root, split_history)
return root, split_history
def build(root):
split_history = []
return build_level(root, split_history)
oc1_tree, split_history_oc1 = build(root)
def plot_tree(split_history):
tree = pydot.Dot(graph_type='graph')
for split in split_history:
new_edge = pydot.Edge(split[0], split[1])
tree.add_edge(new_edge)
tree.write('oc1_tree.png', format='png')
plot_tree(split_history_oc1)
from IPython.display import Image
Image(filename='oc1_tree.png')
| 0.470737 | 0.96378 |
**Introduction to surrogate Modelling**
Pierre Kerfriden, Mines ParisTech / Cardiff University
Content:
- Exercise 1. Frequentist model selection
- Demo 2. Bayesian generalised linear regression
- Exercise 3. Nonparametric bayesian modelling
- Exercise 4. Bayesian optimisation
---
**Exercise 1. Frequentist model selection**
1. Propose an algorithm to select the polynomial order optimally. The number of model evaluations is fixed for now (*e.g.* 10 points)
2. Propose an algorithm to select number of model evaluations optimally when the polynomial order is fixed
3. Propose an algorithm to select the polynomial order AND number of model evaluations optimally
4. Propose an algorithm to select the polynomial order, number of model evaluations and ridge regularisation parameter optimally
Note: np.random.seed(x) allows you to fix the seed of the random generator. If unchanged, the code will generate the same pseudo-random number every time it is ran. Change x if you want to generate new random evaluation points.
---
**Exercise 2. Bayesian model selection**
- Select the polynomial order for N=30 model evaluation (random sequence)
- Suggest an algorithm for choosing the polynomial order (which may differ for the two input dimensions) and the number of model evaluations
Note: you may also switch to a compressive sensing model (TypeRegularisation = 1) and investigate the effect of the corresponding regularisation coefficient (RegulCoeff)
---
**Exercise 3: Nonparametric bayesian modelling: Gaussian Process Regression**
1. Implement an exponential Kernel: modify the Covariance matrix and the cross-covariance vector
2. Implement the automatic choice of the covariance length scale by maximising the data likelihood (at the moment, the amplitude of the Kernel is optimised
3. Optimise both the amplitude and length-scale parameters
Note: look for comments "... is to be updated" in the code
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin_bfgs, fmin, fminbound
class SimpleModel1D:
def eval(self,x) :
f = np.sin(3.0*x-2.25) + abs(x) + x
return f
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
yPlot = self.eval(xPlot)
plt.plot(xPlot,yPlot,c='black',)
class GPRegressionMM1D:
def __init__(self):
self.FullModel = None # Full model
self.Centers = None # list of coordinates of data points ("x")
self.Vals = None # list of function values at data points ("y")
self.NbRBFs = 10 # Number of data points
self.TypeQuadrature = 'MC' # Type of automatised sempling 'MC' (random) or 'Uniform' (deterministic)
self.l = 1.0 # length scale of exponential covariance Kernel
self.Covariance = None # data covariance matrix
self.A = 1.0 # amplitude of exponential covariance Kernel
self.sigma = 1.e-8 # amplitude of white noise Kernel
def BuildQuadrature(self):
if self.TypeQuadrature == 'MC':
self.Centers = 2.0 * np.random.rand(self.NbRBFs,1) - 1.0
elif self.TypeQuadrature == 'Uniform':
self.Centers = np.linspace(-1,1,self.NbRBFs)
else:
print('nod coded')
def Fit(self):
self.Vals = np.zeros((self.NbRBFs,1))
for i in range(self.NbRBFs):
self.Vals[i] = self.FullModel.eval(self.Centers[i])
self.Covariance = np.zeros((len(self.Centers),len(self.Centers)))
#self.Covariance = self.sigma * np.identity(len(self.Centers)) # Covariance matrix is to be updated
for i in range(len(self.Centers)):
for j in range(len(self.Centers)):
distance = self.Centers[i] - self.Centers[j]
self.Covariance[i,j] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
self.Covariance = self.Covariance + self.sigma * np.identity(len(self.Centers))
#print('np.linalg.cond(self.Covariance) ', np.linalg.cond(self.Covariance))
def eval(self,x) :
CrossCo = np.zeros((len(self.Centers),1))
for i in range(len(self.Centers)):
#CrossCo[i] = 0. # Covariance vector is to be updated
distance = self.Centers[i] - x
CrossCo[i] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
Tmp = np.linalg.solve(self.Covariance,self.Vals)
f = np.dot(np.transpose(CrossCo),Tmp)
Tmp = np.linalg.solve(self.Covariance,CrossCo)
Covf = self.A - np.dot(np.transpose(CrossCo),Tmp)
return f, Covf
def LogLikelihood(self) :
Tmp = np.linalg.solve(self.Covariance,self.Vals)
# https://blogs.sas.com/content/iml/2012/10/31/compute-the-log-determinant-of-a-matrix.html
LogLike = - 0.5 * np.dot(np.transpose(self.Vals),Tmp) - 0.5 * self.Covariance.shape[0] * np.log(2*np.pi) #(eq. 2.30 rasmussen book Gaussian Process for Machine Learning)
#logdet = np.log(np.linalg.det(self.Covariance))
sign, logdet = np.linalg.slogdet(self.Covariance)
LogLike = LogLike - 0.5*logdet
return LogLike
def Objectif(self,mu):
self.l = np.exp(mu[0])
self.A = np.exp(mu[1])
self.sigma = np.exp(mu[2])
self.Fit() # recompute data covariance
return -1.0*self.LogLikelihood()
def Optimise(self):
muInit = [0.1 , 0., -10]
InitVal = self.Objectif(muInit)
#mu_opt = fmin_bfgs( self.Objectif, muInit , gtol=1e-3)
mu_opt = fmin( self.Objectif , muInit )
print('optimal parameter:', mu_opt, ' f value at optimal :', self.Objectif(mu_opt), 'Init : ', muInit , ' f value at init :', InitVal )
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
# plot posterior mean and 95% credible region
yPlot = np.copy(xPlot)
yPlotP = np.copy(xPlot)
yPlotM= np.copy(xPlot)
for i in range(len(xPlot)):
f, Covf = self.eval(xPlot[i])
yPlot[i] = f
yPlotP[i] = f + 1.96 * Covf
yPlotM[i] = f - 1.96 * Covf
plt.plot(xPlot,yPlot,'blue')
plt.plot(xPlot,yPlotP,'r')
plt.plot(xPlot,yPlotM,'g')
plt.scatter(self.Centers,self.Vals, marker='o', c='black')
def GaussianProcessMetaModel1D():
print(' ----------------------------------')
print(' ---------- Exercise GP -----------')
print(' ----------------------------------')
print(' 1. Implement an exponential Kernel: modify the Covariance matrix and the cross-covariance vector')
print(' 2. Propose a Greedy algorithm to iteratively add points to the data set')
print(' 3. Implement the automatic choice of the covariance length scale by maximising the data likelihood')
NSampling = 4
SmoothingLength = 0.2
#TypeTypeQuadrature = 'Uniform'
TypeTypeQuadrature = 'MC'
RidgeCoeff = 1e-10
np.random.seed(11)
print(' ---------------------------------------')
print(' ---------- Gaussian process -----------')
print(' ---------------------------------------')
plt.figure()
M = SimpleModel1D()
#MM = KernelRegressionMM1D()
MM = GPRegressionMM1D()
MM.FullModel = M
MM.TypeQuadrature = TypeTypeQuadrature
MM.NbRBFs = NSampling # Number of uniformly, randomly distributed radial basis functions
MM.l = SmoothingLength # length scale of Kernel smoother
MM.RidgeCoeff = RidgeCoeff
MM.BuildQuadrature()
MM.Fit()
MM.plot([-1,1],100)
M.plot([-1,1],100)
plt.grid()
plt.show()
print(' ------------------------------------------------------')
print(' ---------- Optimisation of data likelihood -----------')
print(' ------------------------------------------------------')
print('log likelihood',MM.LogLikelihood())
MM.Optimise()
MM.plot([-1,1],100)
M.plot([-1,1],100)
plt.grid()
plt.show()
GaussianProcessMetaModel1D()
```
---
**Exercise 4: Bayesian optimisation**
Suggest and implement a Greedy algorithm to iteratively add points to the data set based on (a) minimising uncertainty (b) finding the location of the minimum of the function
Note: look for comment "change this" in the code
```
import numpy as np
import matplotlib.pyplot as plt
class SimpleModel1D:
def eval(self,x) :
f = np.sin(3.0*x-2.25) + abs(x) + x
return f
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
yPlot = self.eval(xPlot)
plt.plot(xPlot,yPlot,c='black',)
class GPRegressionMM1D:
def __init__(self):
self.FullModel = None # Full model
self.Centers = None # list of coordinates of data points ("x")
self.Vals = None # list of function values at data points ("y")
self.NbRBFs = 10 # Number of data points
self.TypeQuadrature = 'MC' # Type of automatised sempling 'MC' (random) or 'Uniform' (deterministic)
self.l = 1.0 # length scale of exponential covariance Kernel
self.Covariance = None # data covariance matrix
self.A = 1.0 # amplitude of exponential covariance Kernel
self.sigma = 1.e-8 # amplitude of white noise Kernel
self.ParameterSpace = [-1,1]
def BuildQuadrature(self):
if self.TypeQuadrature == 'MC':
self.Centers = (self.ParameterSpace[1] - self.ParameterSpace[0]) * np.random.rand(self.NbRBFs,1) + self.ParameterSpace[0]
elif self.TypeQuadrature == 'Uniform':
self.Centers = np.linspace(-1,1,self.NbRBFs)
else:
print('nod coded')
def Fit(self):
self.Vals = np.zeros((len(self.Centers),1))
for i in range(len(self.Centers)):
self.Vals[i] = self.FullModel.eval(self.Centers[i])
self.Covariance = np.zeros((len(self.Centers),len(self.Centers)))
for i in range(len(self.Centers)):
for j in range(len(self.Centers)):
distance = self.Centers[i] - self.Centers[j]
self.Covariance[i,j] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
self.Covariance = self.Covariance + self.sigma * np.identity(len(self.Centers))
print('np.linalg.cond(self.Covariance) ', np.linalg.cond(self.Covariance))
def eval(self,x) :
CrossCo = np.zeros((len(self.Centers),1))
for i in range(len(self.Centers)):
distance = self.Centers[i] - x
CrossCo[i] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
Tmp = np.linalg.solve(self.Covariance,self.Vals)
f = np.dot(np.transpose(CrossCo),Tmp)
Tmp = np.linalg.solve(self.Covariance,CrossCo)
Covf = self.A - np.dot(np.transpose(CrossCo),Tmp)
return f, Covf
def OptimNewPoint(self) :
#NewPoint = 2.0 * np.random.rand(1,1) - 1.0 # change this !!!
#grid_search = np.linspace(-1, 1, 100)
grid_search = 2.0*np.random.rand(50,1) - 1.0
min_val = 1.0e10
for i in range(len(grid_search)):
f, Covf = self.eval(grid_search[i])
if min_val > f - 1.96*Covf:
min_val = f - 1.96*Covf
NewPoint = grid_search[i]
print('NewPoint',NewPoint)
self.Centers = np.append(self.Centers,NewPoint)
def ActiveLearning(self,NActiveLearning) :
for i in range(NActiveLearning):
self.OptimNewPoint()
self.Fit()
self.plot(self.ParameterSpace,100)
plt.grid()
plt.show()
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
# plot posterior mean and 95% credible region
yPlot = np.copy(xPlot)
yPlotP = np.copy(xPlot)
yPlotM= np.copy(xPlot)
for i in range(len(xPlot)):
f, Covf = self.eval(xPlot[i])
yPlot[i] = f
yPlotP[i] = f + 1.96 * Covf
yPlotM[i] = f - 1.96 * Covf
plt.plot(xPlot,yPlot,'blue')
plt.plot(xPlot,yPlotP,'r')
plt.plot(xPlot,yPlotM,'g')
plt.scatter(self.Centers,self.Vals, marker='o', c='black')
def GaussianProcessMetaModel1D():
print(' ----------------------------------')
print(' ---------- Exercise GP -----------')
print(' ----------------------------------')
print(' 1. Implement an exponential Kernel Covariance matrix ')
print(' 2. Propose a Greedy algorithm to iteratively add points to the data set')
print(' 3. Implement the automatic choice of the covariance length scale by maximising the data likelihood')
NSampling = 4
SmoothingLength = 0.2
#TypeTypeQuadrature = 'Uniform'
TypeTypeQuadrature = 'MC'
RidgeCoeff = 1e-10
np.random.seed(11)
print(' ---------------------------------------')
print(' ---------- Gaussian process -----------')
print(' ---------------------------------------')
plt.figure()
M = SimpleModel1D()
#MM = KernelRegressionMM1D()
MM = GPRegressionMM1D()
MM.FullModel = M
MM.TypeQuadrature = TypeTypeQuadrature
MM.ParameterSpace = [-1,1]
MM.NbRBFs = NSampling # Number of uniformly, randomly distributed radial basis functions
MM.l = SmoothingLength # length scale of Kernel smoother
MM.RidgeCoeff = RidgeCoeff
MM.BuildQuadrature()
MM.Fit()
MM.plot(MM.ParameterSpace,100)
M.plot([-1,1],100)
plt.grid()
plt.show()
print(' --------------------------------------')
print(' ---------- Active Learning -----------')
print(' --------------------------------------')
NActiveLearning = 4
MM.ActiveLearning(NActiveLearning)
GaussianProcessMetaModel1D()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin_bfgs, fmin, fminbound
class SimpleModel1D:
def eval(self,x) :
f = np.sin(3.0*x-2.25) + abs(x) + x
return f
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
yPlot = self.eval(xPlot)
plt.plot(xPlot,yPlot,c='black',)
class GPRegressionMM1D:
def __init__(self):
self.FullModel = None # Full model
self.Centers = None # list of coordinates of data points ("x")
self.Vals = None # list of function values at data points ("y")
self.NbRBFs = 10 # Number of data points
self.TypeQuadrature = 'MC' # Type of automatised sempling 'MC' (random) or 'Uniform' (deterministic)
self.l = 1.0 # length scale of exponential covariance Kernel
self.Covariance = None # data covariance matrix
self.A = 1.0 # amplitude of exponential covariance Kernel
self.sigma = 1.e-8 # amplitude of white noise Kernel
def BuildQuadrature(self):
if self.TypeQuadrature == 'MC':
self.Centers = 2.0 * np.random.rand(self.NbRBFs,1) - 1.0
elif self.TypeQuadrature == 'Uniform':
self.Centers = np.linspace(-1,1,self.NbRBFs)
else:
print('nod coded')
def Fit(self):
self.Vals = np.zeros((self.NbRBFs,1))
for i in range(self.NbRBFs):
self.Vals[i] = self.FullModel.eval(self.Centers[i])
self.Covariance = np.zeros((len(self.Centers),len(self.Centers)))
#self.Covariance = self.sigma * np.identity(len(self.Centers)) # Covariance matrix is to be updated
for i in range(len(self.Centers)):
for j in range(len(self.Centers)):
distance = self.Centers[i] - self.Centers[j]
self.Covariance[i,j] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
self.Covariance = self.Covariance + self.sigma * np.identity(len(self.Centers))
#print('np.linalg.cond(self.Covariance) ', np.linalg.cond(self.Covariance))
def eval(self,x) :
CrossCo = np.zeros((len(self.Centers),1))
for i in range(len(self.Centers)):
#CrossCo[i] = 0. # Covariance vector is to be updated
distance = self.Centers[i] - x
CrossCo[i] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
Tmp = np.linalg.solve(self.Covariance,self.Vals)
f = np.dot(np.transpose(CrossCo),Tmp)
Tmp = np.linalg.solve(self.Covariance,CrossCo)
Covf = self.A - np.dot(np.transpose(CrossCo),Tmp)
return f, Covf
def LogLikelihood(self) :
Tmp = np.linalg.solve(self.Covariance,self.Vals)
# https://blogs.sas.com/content/iml/2012/10/31/compute-the-log-determinant-of-a-matrix.html
LogLike = - 0.5 * np.dot(np.transpose(self.Vals),Tmp) - 0.5 * self.Covariance.shape[0] * np.log(2*np.pi) #(eq. 2.30 rasmussen book Gaussian Process for Machine Learning)
#logdet = np.log(np.linalg.det(self.Covariance))
sign, logdet = np.linalg.slogdet(self.Covariance)
LogLike = LogLike - 0.5*logdet
return LogLike
def Objectif(self,mu):
self.l = np.exp(mu[0])
self.A = np.exp(mu[1])
self.sigma = np.exp(mu[2])
self.Fit() # recompute data covariance
return -1.0*self.LogLikelihood()
def Optimise(self):
muInit = [0.1 , 0., -10]
InitVal = self.Objectif(muInit)
#mu_opt = fmin_bfgs( self.Objectif, muInit , gtol=1e-3)
mu_opt = fmin( self.Objectif , muInit )
print('optimal parameter:', mu_opt, ' f value at optimal :', self.Objectif(mu_opt), 'Init : ', muInit , ' f value at init :', InitVal )
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
# plot posterior mean and 95% credible region
yPlot = np.copy(xPlot)
yPlotP = np.copy(xPlot)
yPlotM= np.copy(xPlot)
for i in range(len(xPlot)):
f, Covf = self.eval(xPlot[i])
yPlot[i] = f
yPlotP[i] = f + 1.96 * Covf
yPlotM[i] = f - 1.96 * Covf
plt.plot(xPlot,yPlot,'blue')
plt.plot(xPlot,yPlotP,'r')
plt.plot(xPlot,yPlotM,'g')
plt.scatter(self.Centers,self.Vals, marker='o', c='black')
def GaussianProcessMetaModel1D():
print(' ----------------------------------')
print(' ---------- Exercise GP -----------')
print(' ----------------------------------')
print(' 1. Implement an exponential Kernel: modify the Covariance matrix and the cross-covariance vector')
print(' 2. Propose a Greedy algorithm to iteratively add points to the data set')
print(' 3. Implement the automatic choice of the covariance length scale by maximising the data likelihood')
NSampling = 4
SmoothingLength = 0.2
#TypeTypeQuadrature = 'Uniform'
TypeTypeQuadrature = 'MC'
RidgeCoeff = 1e-10
np.random.seed(11)
print(' ---------------------------------------')
print(' ---------- Gaussian process -----------')
print(' ---------------------------------------')
plt.figure()
M = SimpleModel1D()
#MM = KernelRegressionMM1D()
MM = GPRegressionMM1D()
MM.FullModel = M
MM.TypeQuadrature = TypeTypeQuadrature
MM.NbRBFs = NSampling # Number of uniformly, randomly distributed radial basis functions
MM.l = SmoothingLength # length scale of Kernel smoother
MM.RidgeCoeff = RidgeCoeff
MM.BuildQuadrature()
MM.Fit()
MM.plot([-1,1],100)
M.plot([-1,1],100)
plt.grid()
plt.show()
print(' ------------------------------------------------------')
print(' ---------- Optimisation of data likelihood -----------')
print(' ------------------------------------------------------')
print('log likelihood',MM.LogLikelihood())
MM.Optimise()
MM.plot([-1,1],100)
M.plot([-1,1],100)
plt.grid()
plt.show()
GaussianProcessMetaModel1D()
import numpy as np
import matplotlib.pyplot as plt
class SimpleModel1D:
def eval(self,x) :
f = np.sin(3.0*x-2.25) + abs(x) + x
return f
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
yPlot = self.eval(xPlot)
plt.plot(xPlot,yPlot,c='black',)
class GPRegressionMM1D:
def __init__(self):
self.FullModel = None # Full model
self.Centers = None # list of coordinates of data points ("x")
self.Vals = None # list of function values at data points ("y")
self.NbRBFs = 10 # Number of data points
self.TypeQuadrature = 'MC' # Type of automatised sempling 'MC' (random) or 'Uniform' (deterministic)
self.l = 1.0 # length scale of exponential covariance Kernel
self.Covariance = None # data covariance matrix
self.A = 1.0 # amplitude of exponential covariance Kernel
self.sigma = 1.e-8 # amplitude of white noise Kernel
self.ParameterSpace = [-1,1]
def BuildQuadrature(self):
if self.TypeQuadrature == 'MC':
self.Centers = (self.ParameterSpace[1] - self.ParameterSpace[0]) * np.random.rand(self.NbRBFs,1) + self.ParameterSpace[0]
elif self.TypeQuadrature == 'Uniform':
self.Centers = np.linspace(-1,1,self.NbRBFs)
else:
print('nod coded')
def Fit(self):
self.Vals = np.zeros((len(self.Centers),1))
for i in range(len(self.Centers)):
self.Vals[i] = self.FullModel.eval(self.Centers[i])
self.Covariance = np.zeros((len(self.Centers),len(self.Centers)))
for i in range(len(self.Centers)):
for j in range(len(self.Centers)):
distance = self.Centers[i] - self.Centers[j]
self.Covariance[i,j] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
self.Covariance = self.Covariance + self.sigma * np.identity(len(self.Centers))
print('np.linalg.cond(self.Covariance) ', np.linalg.cond(self.Covariance))
def eval(self,x) :
CrossCo = np.zeros((len(self.Centers),1))
for i in range(len(self.Centers)):
distance = self.Centers[i] - x
CrossCo[i] = self.A * np.exp( -1.0/(2*self.l**2) * distance**2 )
Tmp = np.linalg.solve(self.Covariance,self.Vals)
f = np.dot(np.transpose(CrossCo),Tmp)
Tmp = np.linalg.solve(self.Covariance,CrossCo)
Covf = self.A - np.dot(np.transpose(CrossCo),Tmp)
return f, Covf
def OptimNewPoint(self) :
#NewPoint = 2.0 * np.random.rand(1,1) - 1.0 # change this !!!
#grid_search = np.linspace(-1, 1, 100)
grid_search = 2.0*np.random.rand(50,1) - 1.0
min_val = 1.0e10
for i in range(len(grid_search)):
f, Covf = self.eval(grid_search[i])
if min_val > f - 1.96*Covf:
min_val = f - 1.96*Covf
NewPoint = grid_search[i]
print('NewPoint',NewPoint)
self.Centers = np.append(self.Centers,NewPoint)
def ActiveLearning(self,NActiveLearning) :
for i in range(NActiveLearning):
self.OptimNewPoint()
self.Fit()
self.plot(self.ParameterSpace,100)
plt.grid()
plt.show()
def plot(self,xLim,NPoint):
xPlot = np.linspace(xLim[0],xLim[1], NPoint)
# plot posterior mean and 95% credible region
yPlot = np.copy(xPlot)
yPlotP = np.copy(xPlot)
yPlotM= np.copy(xPlot)
for i in range(len(xPlot)):
f, Covf = self.eval(xPlot[i])
yPlot[i] = f
yPlotP[i] = f + 1.96 * Covf
yPlotM[i] = f - 1.96 * Covf
plt.plot(xPlot,yPlot,'blue')
plt.plot(xPlot,yPlotP,'r')
plt.plot(xPlot,yPlotM,'g')
plt.scatter(self.Centers,self.Vals, marker='o', c='black')
def GaussianProcessMetaModel1D():
print(' ----------------------------------')
print(' ---------- Exercise GP -----------')
print(' ----------------------------------')
print(' 1. Implement an exponential Kernel Covariance matrix ')
print(' 2. Propose a Greedy algorithm to iteratively add points to the data set')
print(' 3. Implement the automatic choice of the covariance length scale by maximising the data likelihood')
NSampling = 4
SmoothingLength = 0.2
#TypeTypeQuadrature = 'Uniform'
TypeTypeQuadrature = 'MC'
RidgeCoeff = 1e-10
np.random.seed(11)
print(' ---------------------------------------')
print(' ---------- Gaussian process -----------')
print(' ---------------------------------------')
plt.figure()
M = SimpleModel1D()
#MM = KernelRegressionMM1D()
MM = GPRegressionMM1D()
MM.FullModel = M
MM.TypeQuadrature = TypeTypeQuadrature
MM.ParameterSpace = [-1,1]
MM.NbRBFs = NSampling # Number of uniformly, randomly distributed radial basis functions
MM.l = SmoothingLength # length scale of Kernel smoother
MM.RidgeCoeff = RidgeCoeff
MM.BuildQuadrature()
MM.Fit()
MM.plot(MM.ParameterSpace,100)
M.plot([-1,1],100)
plt.grid()
plt.show()
print(' --------------------------------------')
print(' ---------- Active Learning -----------')
print(' --------------------------------------')
NActiveLearning = 4
MM.ActiveLearning(NActiveLearning)
GaussianProcessMetaModel1D()
| 0.589244 | 0.970324 |
# U-Net: nuclei segmentation 1
This is an implementation of a [Kaggle kernel](https://www.kaggle.com/c0conuts/unet-imagedatagenerator-lb-0-336/notebook) of a [U-net](https://arxiv.org/abs/1505.04597) claiming to achieve a baseline score of 0.227.
```
%pwd
import os
import sys
import random
import warnings
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm
from itertools import chain
from skimage import img_as_ubyte
from skimage.io import imread, imsave, imshow, concatenate_images
from skimage.segmentation import find_boundaries
from skimage.transform import resize
from skimage.morphology import label, remove_small_objects, remove_small_holes, watershed
from skimage.feature import peak_local_max
from sklearn.model_selection import train_test_split
from scipy import ndimage as ndi
from keras.preprocessing import image
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
from utils.imaging import get_path, get_image_ids, label_mask, segmented_annotate, segmented_and_ground_truth_annotate
from utils.evaluate import keras_mean_iou, submit_kaggle
from utils import run_length_encoding
%matplotlib inline
plt.rcParams['image.cmap'] = 'gray'
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
```
get model name form notebook name using javascript
```
%%javascript
IPython.notebook.kernel.execute('nb_name = ' + '"' + IPython.notebook.notebook_name + '"')
notebook_name = os.path.splitext(os.path.basename(nb_name))[0]
model_name = notebook_name + '.h5'
model_path = get_path('models') + model_name
submission_name = notebook_name + '.csv'
submission_path = get_path('submission') + submission_name
```
### 0. Parameters
```
seed = 42
# model parameters
BATCH_SIZE = 150
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
TRAIN_PATH = get_path('data_train_1')
TEST_PATH = get_path('data_test_1')
test_size = 0.20
# augmentation parameters
aug_params = {
'shear_range' : 0.7,
'rotation_range' : 99,
'horizontal_flip' : True,
'vertical_flip' : True,
'zoom_range' : 0.3,
'width_shift_range' : 0.3,
'height_shift_range' : 0.3,
'fill_mode' : 'reflect'
}
```
### 1. Preprocess data
```
# Get train and test IDs
train_ids = get_image_ids(TRAIN_PATH)
test_ids = get_image_ids(TEST_PATH)
# Get and resize train images and masks
X = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
sizes_train = []
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
path = TRAIN_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_train.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X[n] = img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y[n] = mask
# Get and resize test images
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
sizes_test = []
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
path = TEST_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_test.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[n] = img
```
### 2. Data augmentation
```
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=test_size, random_state=seed)
image_datagen = image.ImageDataGenerator(**aug_params)
mask_datagen = image.ImageDataGenerator(**aug_params)
image_datagen.fit(X_train, augment=True, seed=seed)
mask_datagen.fit(Y_train, augment=True, seed=seed)
x = image_datagen.flow(X_train, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
y = mask_datagen.flow(Y_train, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
image_datagen_val = image.ImageDataGenerator()
mask_datagen_val = image.ImageDataGenerator()
image_datagen_val.fit(X_val, augment=True, seed=seed)
mask_datagen_val.fit(Y_val, augment=True, seed=seed)
x_val = image_datagen_val.flow(X_val, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
y_val = mask_datagen_val.flow(Y_val, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
# creating a training and validation generator that generate masks and images
train_generator = zip(x, y)
val_generator = zip(x_val, y_val)
```
### 3. Initialise U-Net model
```
# Build U-Net model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (s)
c1 = Dropout(0.1) (c1)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1)
c2 = Dropout(0.1) (c2)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2)
c3 = Dropout(0.2) (c3)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3)
c4 = Dropout(0.2) (c4)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4)
c5 = Dropout(0.3) (c5)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u6)
c6 = Dropout(0.2) (c6)
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u7)
c7 = Dropout(0.2) (c7)
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u8)
c8 = Dropout(0.1) (c8)
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u9)
c9 = Dropout(0.1) (c9)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[keras_mean_iou])
model.summary()
```
### 4. Fit U-Net model
```
earlystopper = EarlyStopping(patience=3, verbose=1)
checkpointer = ModelCheckpoint(model_path, verbose=1, save_best_only=True)
results = model.fit_generator(train_generator,
validation_data=val_generator,
validation_steps=10,
steps_per_epoch=250,
epochs=3,
callbacks=[earlystopper, checkpointer]
)
```
### 5. Predict with U-Net model
```
# Predict on train, val and test
model = load_model(model_path, custom_objects={'keras_mean_iou': keras_mean_iou})
preds_train = model.predict(X, verbose=1)
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# Create list of upsampled test masks
preds_test_upsampled = []
for i in range(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]),
(sizes_test[i][0], sizes_test[i][1]),
mode='constant', preserve_range=True))
# Create list of upsampled train masks
preds_train_upsampled = []
for i in range(len(preds_train)):
preds_train_upsampled.append(resize(np.squeeze(preds_train[i]),
(sizes_train[i][0], sizes_train[i][1]),
mode='constant', preserve_range=True))
```
sanity check on some training examples
```
f, axarr = plt.subplots(2,3,figsize=(12,12))
ix1 = random.randint(0, len(preds_train_t))
ix2 = random.randint(0, len(preds_train_t))
axarr[0,0].imshow(X[ix1])
axarr[0,1].imshow(np.squeeze(Y[ix1]))
axarr[0,2].imshow(np.squeeze(preds_train_t[ix1]))
axarr[1,0].imshow(X[ix2])
axarr[1,1].imshow(np.squeeze(Y[ix2]))
axarr[1,2].imshow(np.squeeze(preds_train_t[ix2]))
```
### 7. Output image labels
Saving test labelled images
```
def postprocess_morphology(img):
blobs = img > 0.5
blobs = remove_small_holes(blobs, 20)
distance = ndi.distance_transform_edt(blobs)
distance = np.clip(distance - 8, 0, 255)
markers = ndi.label(distance)[0]
labels = watershed(-distance, markers, mask=blobs)
blobs = blobs^find_boundaries(labels, mode='inner')
blobs = remove_small_objects(blobs, 20)
blobs_labels = ndi.label(blobs)[0]
return blobs_labels
for idx, image_id in tqdm(enumerate(train_ids), total=len(train_ids)):
mask = preds_train_upsampled[idx] > 0.5
labels = postprocess_morphology(mask)
#labels = label_mask(mask)
imsave(get_path('output_train_1_lab_seg') + image_id + '.png', labels)
for idx, image_id in tqdm(enumerate(test_ids), total=len(test_ids)):
mask = preds_test_upsampled[idx] > 0.5
labels = postprocess_morphology(mask)
imsave(get_path('output_test_1_lab_seg') + image_id + '.png', labels)
```
Saving test annotated images
```
segmented_annotate(image_type = 'test')
segmented_and_ground_truth_annotate(stage_num = 1)
```
### 8. Kaggle submit
```
df = run_length_encoding.rle_images_in_dir(image_type = 'test', stage_num = 1)
df.to_csv(submission_path, index=False)
message = "improved watershed and changed train test split method"
submit_string = submit_kaggle(notebook_name, submission_path, message)
!$submit_string
```
|
github_jupyter
|
%pwd
import os
import sys
import random
import warnings
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm import tqdm
from itertools import chain
from skimage import img_as_ubyte
from skimage.io import imread, imsave, imshow, concatenate_images
from skimage.segmentation import find_boundaries
from skimage.transform import resize
from skimage.morphology import label, remove_small_objects, remove_small_holes, watershed
from skimage.feature import peak_local_max
from sklearn.model_selection import train_test_split
from scipy import ndimage as ndi
from keras.preprocessing import image
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
from utils.imaging import get_path, get_image_ids, label_mask, segmented_annotate, segmented_and_ground_truth_annotate
from utils.evaluate import keras_mean_iou, submit_kaggle
from utils import run_length_encoding
%matplotlib inline
plt.rcParams['image.cmap'] = 'gray'
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
%%javascript
IPython.notebook.kernel.execute('nb_name = ' + '"' + IPython.notebook.notebook_name + '"')
notebook_name = os.path.splitext(os.path.basename(nb_name))[0]
model_name = notebook_name + '.h5'
model_path = get_path('models') + model_name
submission_name = notebook_name + '.csv'
submission_path = get_path('submission') + submission_name
seed = 42
# model parameters
BATCH_SIZE = 150
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
TRAIN_PATH = get_path('data_train_1')
TEST_PATH = get_path('data_test_1')
test_size = 0.20
# augmentation parameters
aug_params = {
'shear_range' : 0.7,
'rotation_range' : 99,
'horizontal_flip' : True,
'vertical_flip' : True,
'zoom_range' : 0.3,
'width_shift_range' : 0.3,
'height_shift_range' : 0.3,
'fill_mode' : 'reflect'
}
# Get train and test IDs
train_ids = get_image_ids(TRAIN_PATH)
test_ids = get_image_ids(TEST_PATH)
# Get and resize train images and masks
X = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
sizes_train = []
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
path = TRAIN_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_train.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X[n] = img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y[n] = mask
# Get and resize test images
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
sizes_test = []
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
path = TEST_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]
sizes_test.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[n] = img
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=test_size, random_state=seed)
image_datagen = image.ImageDataGenerator(**aug_params)
mask_datagen = image.ImageDataGenerator(**aug_params)
image_datagen.fit(X_train, augment=True, seed=seed)
mask_datagen.fit(Y_train, augment=True, seed=seed)
x = image_datagen.flow(X_train, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
y = mask_datagen.flow(Y_train, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
image_datagen_val = image.ImageDataGenerator()
mask_datagen_val = image.ImageDataGenerator()
image_datagen_val.fit(X_val, augment=True, seed=seed)
mask_datagen_val.fit(Y_val, augment=True, seed=seed)
x_val = image_datagen_val.flow(X_val, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
y_val = mask_datagen_val.flow(Y_val, batch_size=BATCH_SIZE, shuffle=True, seed=seed)
# creating a training and validation generator that generate masks and images
train_generator = zip(x, y)
val_generator = zip(x_val, y_val)
# Build U-Net model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (s)
c1 = Dropout(0.1) (c1)
c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1)
c2 = Dropout(0.1) (c2)
c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2)
c3 = Dropout(0.2) (c3)
c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3)
c4 = Dropout(0.2) (c4)
c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4)
c5 = Dropout(0.3) (c5)
c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5)
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u6)
c6 = Dropout(0.2) (c6)
c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u7)
c7 = Dropout(0.2) (c7)
c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u8)
c8 = Dropout(0.1) (c8)
c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u9)
c9 = Dropout(0.1) (c9)
c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[keras_mean_iou])
model.summary()
earlystopper = EarlyStopping(patience=3, verbose=1)
checkpointer = ModelCheckpoint(model_path, verbose=1, save_best_only=True)
results = model.fit_generator(train_generator,
validation_data=val_generator,
validation_steps=10,
steps_per_epoch=250,
epochs=3,
callbacks=[earlystopper, checkpointer]
)
# Predict on train, val and test
model = load_model(model_path, custom_objects={'keras_mean_iou': keras_mean_iou})
preds_train = model.predict(X, verbose=1)
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# Create list of upsampled test masks
preds_test_upsampled = []
for i in range(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]),
(sizes_test[i][0], sizes_test[i][1]),
mode='constant', preserve_range=True))
# Create list of upsampled train masks
preds_train_upsampled = []
for i in range(len(preds_train)):
preds_train_upsampled.append(resize(np.squeeze(preds_train[i]),
(sizes_train[i][0], sizes_train[i][1]),
mode='constant', preserve_range=True))
f, axarr = plt.subplots(2,3,figsize=(12,12))
ix1 = random.randint(0, len(preds_train_t))
ix2 = random.randint(0, len(preds_train_t))
axarr[0,0].imshow(X[ix1])
axarr[0,1].imshow(np.squeeze(Y[ix1]))
axarr[0,2].imshow(np.squeeze(preds_train_t[ix1]))
axarr[1,0].imshow(X[ix2])
axarr[1,1].imshow(np.squeeze(Y[ix2]))
axarr[1,2].imshow(np.squeeze(preds_train_t[ix2]))
def postprocess_morphology(img):
blobs = img > 0.5
blobs = remove_small_holes(blobs, 20)
distance = ndi.distance_transform_edt(blobs)
distance = np.clip(distance - 8, 0, 255)
markers = ndi.label(distance)[0]
labels = watershed(-distance, markers, mask=blobs)
blobs = blobs^find_boundaries(labels, mode='inner')
blobs = remove_small_objects(blobs, 20)
blobs_labels = ndi.label(blobs)[0]
return blobs_labels
for idx, image_id in tqdm(enumerate(train_ids), total=len(train_ids)):
mask = preds_train_upsampled[idx] > 0.5
labels = postprocess_morphology(mask)
#labels = label_mask(mask)
imsave(get_path('output_train_1_lab_seg') + image_id + '.png', labels)
for idx, image_id in tqdm(enumerate(test_ids), total=len(test_ids)):
mask = preds_test_upsampled[idx] > 0.5
labels = postprocess_morphology(mask)
imsave(get_path('output_test_1_lab_seg') + image_id + '.png', labels)
segmented_annotate(image_type = 'test')
segmented_and_ground_truth_annotate(stage_num = 1)
df = run_length_encoding.rle_images_in_dir(image_type = 'test', stage_num = 1)
df.to_csv(submission_path, index=False)
message = "improved watershed and changed train test split method"
submit_string = submit_kaggle(notebook_name, submission_path, message)
!$submit_string
| 0.509032 | 0.793226 |
## import
```
import tensorflow._api.v2.compat.v1 as tf
import numpy as np
tf.disable_v2_behavior()
from PIL import Image
# print('Pillow Version:', PIL.__version__)
from numpy import asarray
import matplotlib.pyplot as plt
```
## initialization
pixel:
x1 | x2 | x3
---|---|---
x4 | y | x5
x6 | x7 | x8
```
# variables
features = 8
y = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32,[None,features])
w = tf.Variable(tf.zeros([features,1]))
b = tf.Variable(tf.zeros(1))
# functions
alfa = 0.000004
pred = tf.matmul(x, w) + b
loss = tf.reduce_mean(tf.pow(pred - y, 2))+ 0.1*tf.reduce_sum(tf.abs(w)) # + lasso regularization
update = tf.train.GradientDescentOptimizer(alfa).minimize(loss)
```
## data function
```
def get_XY_from_image(photo_name:str,color:int,jumps:int=100,show:bool=False):
data = asarray(Image.open(photo_name))
color_arr = data[:,:,color]
image_color_arr = Image.fromarray(color_arr)
if show: image_color_arr.show()
data_x = []
data_y = []
print(f"pic size: {len(color_arr)}x{len(color_arr[0])} name: {photo_name}")
for i in range(1,len(color_arr)-1,jumps):
for j in range(1,len(color_arr[0])-1):
temp_y = [color_arr[i][j]]
temp_x = [color_arr[i-1][j-1],color_arr[i-1][j],color_arr[i][j-1],color_arr[i+1][j],color_arr[i][j+1],color_arr[i+1][j+1],color_arr[i-1][j+1],color_arr[i+1][j-1]]
data_y.append(temp_y)
data_x.append(temp_x)
return (data_x,data_y)
def load_pic_data(pics_array,color:int,jumps:int=100,show:bool=False):
data_x , data_y = get_XY_from_image(pics_array[0],color,jumps,show)
for i in pics_array[1:]:
data_tmp_x , data_tmp_y = get_XY_from_image(i,color,jumps,show)
data_x = np.append(data_x,data_tmp_x,axis=0)
data_y = np.append(data_y,data_tmp_y,axis=0)
data_x = np.array(data_x)
data_y = np.array(data_y)
return data_x,data_y
```
## get data
```
data_x , data_y = load_pic_data(
["data/cat_test.jpg", "data/balloon.jpg","data/cat.jpg","data/city.jpg",
"data/city_night.jpg","data/city_color.jpg","data/sky.jpg",
"data/flower.jpg","data/telescope.jpg","data/moon.jpg"],
color=0,jumps=100)
data_t_x , data_t_y = load_pic_data(["data/park.jpg","data/cloud.jpg"],color=1,jumps=100)
```
## running the sim
```
sess = tf.Session()
sess.run(tf.global_variables_initializer())
show = 10
loss_in_time = []
w_arr = []
test_over_time = []
for i in range(0,10000):
sess.run(update, feed_dict = {x:data_x, y:data_y})
if(i%show==0 and i>100):
tmp = sess.run(loss,feed_dict={x:data_x,y:data_y})
loss_in_time.append(tmp)
w_arr.append(sess.run(w))
if(i%(show*50)==0):
print(f"i = {i}, loss = {tmp}")
test_over_time.append(sess.run(loss,feed_dict={x:data_t_x,y:data_t_y}))
```
## printing and testing
```
d = np.array(np.array(w_arr).transpose()[0]).transpose()
plt.plot(list(i*show for i in range(len(d))),d)
plt.ylabel('w over time')
plt.show()
plt.plot(list(i*show for i in range(len(loss_in_time))),loss_in_time,label ="loss")
plt.plot(list(i*show for i in range(len(test_over_time))),test_over_time , label ="test")
plt.legend()
plt.ylabel('loss over time')
plt.show()
```
## show the calc image
```
red, r_tmp = load_pic_data(["data/park.jpg"],0,jumps=1)
green, g_tmp = load_pic_data(["data/park.jpg"],1,jumps=1)
blue, b_tmp = load_pic_data(["data/park.jpg"],2,jumps=1)
size_x = 588
size_y = 830
red_calculated_mat = np.reshape(sess.run(tf.nn.relu(np.matmul(red,sess.run(w))+sess.run(b))),(size_x-2,size_y-2))
green_calculated_mat = np.reshape(sess.run(tf.nn.relu(np.matmul(green,sess.run(w))+sess.run(b))),(size_x-2,size_y-2))
blue_calculated_mat = np.reshape(sess.run(tf.nn.relu(np.matmul(blue,sess.run(w))+sess.run(b))),(size_x-2,size_y-2))
arr = np.zeros((size_x-2,size_y-2,3))
arr[:,:,0] = np.reshape(r_tmp,(size_x-2,size_y-2))
arr[:,:,1] = np.reshape(g_tmp,(size_x-2,size_y-2))
arr[:,:,2] = np.reshape(b_tmp,(size_x-2,size_y-2))
arr_calc = np.zeros((size_x-2,size_y-2,3))
arr_calc[:,:,0] = red_calculated_mat
arr_calc[:,:,1] = green_calculated_mat
arr_calc[:,:,2] = blue_calculated_mat
data_RGB = np.concatenate((arr.astype('uint8'),arr_calc.astype('uint8')), axis=1)
img = Image.fromarray(data_RGB,mode="RGB")
img.show(title="calculated")
```
|
github_jupyter
|
import tensorflow._api.v2.compat.v1 as tf
import numpy as np
tf.disable_v2_behavior()
from PIL import Image
# print('Pillow Version:', PIL.__version__)
from numpy import asarray
import matplotlib.pyplot as plt
# variables
features = 8
y = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32,[None,features])
w = tf.Variable(tf.zeros([features,1]))
b = tf.Variable(tf.zeros(1))
# functions
alfa = 0.000004
pred = tf.matmul(x, w) + b
loss = tf.reduce_mean(tf.pow(pred - y, 2))+ 0.1*tf.reduce_sum(tf.abs(w)) # + lasso regularization
update = tf.train.GradientDescentOptimizer(alfa).minimize(loss)
def get_XY_from_image(photo_name:str,color:int,jumps:int=100,show:bool=False):
data = asarray(Image.open(photo_name))
color_arr = data[:,:,color]
image_color_arr = Image.fromarray(color_arr)
if show: image_color_arr.show()
data_x = []
data_y = []
print(f"pic size: {len(color_arr)}x{len(color_arr[0])} name: {photo_name}")
for i in range(1,len(color_arr)-1,jumps):
for j in range(1,len(color_arr[0])-1):
temp_y = [color_arr[i][j]]
temp_x = [color_arr[i-1][j-1],color_arr[i-1][j],color_arr[i][j-1],color_arr[i+1][j],color_arr[i][j+1],color_arr[i+1][j+1],color_arr[i-1][j+1],color_arr[i+1][j-1]]
data_y.append(temp_y)
data_x.append(temp_x)
return (data_x,data_y)
def load_pic_data(pics_array,color:int,jumps:int=100,show:bool=False):
data_x , data_y = get_XY_from_image(pics_array[0],color,jumps,show)
for i in pics_array[1:]:
data_tmp_x , data_tmp_y = get_XY_from_image(i,color,jumps,show)
data_x = np.append(data_x,data_tmp_x,axis=0)
data_y = np.append(data_y,data_tmp_y,axis=0)
data_x = np.array(data_x)
data_y = np.array(data_y)
return data_x,data_y
data_x , data_y = load_pic_data(
["data/cat_test.jpg", "data/balloon.jpg","data/cat.jpg","data/city.jpg",
"data/city_night.jpg","data/city_color.jpg","data/sky.jpg",
"data/flower.jpg","data/telescope.jpg","data/moon.jpg"],
color=0,jumps=100)
data_t_x , data_t_y = load_pic_data(["data/park.jpg","data/cloud.jpg"],color=1,jumps=100)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
show = 10
loss_in_time = []
w_arr = []
test_over_time = []
for i in range(0,10000):
sess.run(update, feed_dict = {x:data_x, y:data_y})
if(i%show==0 and i>100):
tmp = sess.run(loss,feed_dict={x:data_x,y:data_y})
loss_in_time.append(tmp)
w_arr.append(sess.run(w))
if(i%(show*50)==0):
print(f"i = {i}, loss = {tmp}")
test_over_time.append(sess.run(loss,feed_dict={x:data_t_x,y:data_t_y}))
d = np.array(np.array(w_arr).transpose()[0]).transpose()
plt.plot(list(i*show for i in range(len(d))),d)
plt.ylabel('w over time')
plt.show()
plt.plot(list(i*show for i in range(len(loss_in_time))),loss_in_time,label ="loss")
plt.plot(list(i*show for i in range(len(test_over_time))),test_over_time , label ="test")
plt.legend()
plt.ylabel('loss over time')
plt.show()
red, r_tmp = load_pic_data(["data/park.jpg"],0,jumps=1)
green, g_tmp = load_pic_data(["data/park.jpg"],1,jumps=1)
blue, b_tmp = load_pic_data(["data/park.jpg"],2,jumps=1)
size_x = 588
size_y = 830
red_calculated_mat = np.reshape(sess.run(tf.nn.relu(np.matmul(red,sess.run(w))+sess.run(b))),(size_x-2,size_y-2))
green_calculated_mat = np.reshape(sess.run(tf.nn.relu(np.matmul(green,sess.run(w))+sess.run(b))),(size_x-2,size_y-2))
blue_calculated_mat = np.reshape(sess.run(tf.nn.relu(np.matmul(blue,sess.run(w))+sess.run(b))),(size_x-2,size_y-2))
arr = np.zeros((size_x-2,size_y-2,3))
arr[:,:,0] = np.reshape(r_tmp,(size_x-2,size_y-2))
arr[:,:,1] = np.reshape(g_tmp,(size_x-2,size_y-2))
arr[:,:,2] = np.reshape(b_tmp,(size_x-2,size_y-2))
arr_calc = np.zeros((size_x-2,size_y-2,3))
arr_calc[:,:,0] = red_calculated_mat
arr_calc[:,:,1] = green_calculated_mat
arr_calc[:,:,2] = blue_calculated_mat
data_RGB = np.concatenate((arr.astype('uint8'),arr_calc.astype('uint8')), axis=1)
img = Image.fromarray(data_RGB,mode="RGB")
img.show(title="calculated")
| 0.24817 | 0.771757 |
Note: If running on Google Colab, make sure to create a data/ folder and src/ folder in the runtime with all of the necessary sub-folders and files. Something like this:

```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import time
import math
import os
class EnVietDataset(Dataset):
def __init__(self, en_path, viet_path, en_vocab_path, viet_vocab_path):
super().__init__()
en_inputs = []
viet_translations = []
with open(en_path, 'r', encoding='utf-8') as en_f:
for en_line in en_f.readlines():
en_sequence = en_line.strip()
en_tokens = en_sequence.split(' ')
en_tokens.insert(0, '<s>')
en_tokens.append('</s>')
en_inputs.append(en_tokens)
with open(viet_path, 'r', encoding='utf-8') as viet_f:
for viet_line in viet_f.readlines():
viet_sequence = viet_line.strip()
viet_tokens = viet_sequence.split(' ')
viet_tokens.insert(0, '<s>')
viet_tokens.append('</s>')
viet_translations.append(viet_tokens)
# Vocab maps english tokens to indices then reverse vocab maps indices to english tokens
en_vocab = self._build_vocab(en_vocab_path)
en_reverse_vocab = {index: token for token, index in en_vocab.items()}
# Vocab maps vietnamese tokens to indices then reverse vocab maps indices to vietnamese tokens
viet_vocab = self._build_vocab(viet_vocab_path)
viet_reverse_vocab = {index: token for token, index in viet_vocab.items()}
self.en_vocab = en_vocab
self.en_reverse_vocab = en_reverse_vocab
self.viet_vocab = viet_vocab
self.viet_reverse_vocab = viet_reverse_vocab
indexed_en_inputs = [self.tokens_to_indices(en_input, lang='en') for en_input in en_inputs]
indexed_viet_translations = [self.tokens_to_indices(viet_translation, lang='viet') for viet_translation in viet_translations]
self.en_inputs = indexed_en_inputs
self.viet_translations = indexed_viet_translations
def __getitem__(self, index):
return self.en_inputs[index], self.viet_translations[index]
def __len__(self):
return len(self.en_inputs)
@staticmethod
def _build_vocab(vocab_path):
"""Builds a vocab (dictionary) of word->index.
Args:
vocab_path (str): Path to the vocab.
Returns:
dict of word->index: The vocab of word->index.
"""
assert os.path.exists(vocab_path)
vocab = {'<pad>': 0}
token_id = 1
with open(vocab_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
token = line.strip()
vocab[token] = token_id
token_id += 1
return vocab
def tokens_to_indices(self, tokens, lang='en'):
"""Converts a list of tokens from strings to their corresponding indices in the specified vocab.
Args:
tokens (list of str's): Tokens to be converted.
lang (str, optional): Specifies which vocab to use. Defaults to 'en' for English. Other option
is 'viet' for Vietnamese.
Returns:
length-N tensor: Tensor containing the indices corresponding to each token.
"""
assert lang == 'en' or lang == 'viet'
indices = []
vocab = self.en_vocab if lang == 'en' else self.viet_vocab
unk_token = vocab['<unk>']
for token in tokens:
indices.append(vocab.get(token, unk_token))
return torch.tensor(indices)
def indices_to_tokens(self, indices, lang='en'):
"""Converts indices to tokens and concatenates them as a string.
Args:
indices (list of str's): A tensor of indices (with shape (N, 1) or length-N), a list of (1, 1) tensors,
or a list of indices (ints).
lang (str, optional): Specifies which vocab to use. Defaults to 'en' for English. Other option
is 'viet' for Vietnamese.
Returns:
str: String from concatenating the tokens.
"""
assert lang == 'en' or lang == 'viet'
tokens = []
reverse_vocab = self.en_reverse_vocab if lang == 'en' else self.viet_reverse_vocab
for index in indices:
if torch.is_tensor(index):
index = index.item()
token = reverse_vocab.get(index, '<unk>')
if token == '<pad>':
continue
tokens.append(token)
return " ".join(tokens)
def collate_fn(batch):
"""Create a batch of data given a list of N input sequences and output sequences. Returns a tuple
containing two tensors each with shape (N, max_sequence_length), where max_sequence_length is the
maximum length of any sequence in the batch.
Args:
batch (list): A list of size N, where each element is a tuple containing two sequence tensors.
Returns:
tuple of two tensors, list of ints, list of ints: A tuple containing two tensors each with
shape (N, max_sequence_length), list of each input sequence's length, and list of each target
sequence's length.
"""
en_inputs, viet_translations = zip(*batch)
max_en_input_length = 0
max_viet_translation_length = 0
e = []
v = []
e_lens = []
v_lens = []
for en_input in en_inputs:
en_input_length = list(en_input.size())[0]
e_lens.append(en_input_length)
if en_input_length > max_en_input_length:
max_en_input_length = en_input_length
for en_input in en_inputs:
en_input_length = list(en_input.size())[0]
if en_input_length < max_en_input_length:
e.append(torch.cat((en_input, torch.zeros(max_en_input_length - en_input_length, dtype=int))))
else:
e.append(en_input)
for viet_translation in viet_translations:
viet_translation_length = list(viet_translation.size())[0]
v_lens.append(viet_translation_length)
if viet_translation_length > max_viet_translation_length:
max_viet_translation_length = viet_translation_length
for viet_translation in viet_translations:
viet_translation_length = list(viet_translation.size())[0]
if viet_translation_length < max_viet_translation_length:
v.append(torch.cat((viet_translation, torch.zeros(max_viet_translation_length - viet_translation_length, dtype=int))))
else:
v.append(viet_translation)
return (torch.stack(e), torch.stack(v)), e_lens, v_lens
class AttnEncoderRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_size):
super().__init__()
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=0)
self.rnn = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=1, batch_first=True)
def forward(self, batch_sequences, seq_lens):
"""Forward pass through the encoder.
Args:
batch_sequences (N-by-seq_len tensor): Batch containing N length-seq_len tensors
(e.g., the sequences to be translated). N is the batch size.
seq_lens (list of ints): List of sequences lengths of each batch element.
Returns:
tuple of one (N, seq_len, hidden_size) tensor and two (1, N, hidden_size) tensors: All hidden states of each
sequence in the batch and (hn, cn) from the RNN (LSTM) layer.
"""
batch_sequences = self.embedding(batch_sequences) # N-by-seq_len-by-embedding_dim
packed_batch_sequences = nn.utils.rnn.pack_padded_sequence(batch_sequences, lengths=seq_lens, batch_first=True, enforce_sorted=False)
out, (hn, cn) = self.rnn(packed_batch_sequences) # hn and cn are both 1-by-N-by-hidden_size
# Unpack output from RNN (LSTM) layer. out_padded is N-by-seq_len-by-hidden_size
out_padded, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
# out_padded: all hidden states of each sequence in the batch
# hn: the final hidden state of each sequence in the batch
# cn: final cell state of each sequence in the batch
return out_padded, hn, cn
class AttnDecoderRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=0)
self.rnn = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=1, batch_first=True)
self.fc = nn.Linear(hidden_size * 2, vocab_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, prev_outputs, prev_hn, prev_cn, encoder_hidden_states, device):
"""Forward pass through the decoder.
Args:
prev_outputs (N-by-1): The ouputs from the previous time step. N is the batch size.
prev_hn (1-by-N-by-hidden_size tensor):
prev_cn (1-by-N-by-hidden_size tensor):
encoder_hidden_states (N-input_seq_len-by-hidden_size tensor):
Returns:
tuple of one (N, vocab_size) tensor and two (1, N, hidden_size) tensors: The predicted outputs and (hn, cn) from
the RNN (LSTM) layer.
"""
embeddings = self.embedding(prev_outputs) # N-by-1-by-embedding_dim
out, (hn, cn) = self.rnn(embeddings, (prev_hn, prev_cn)) # out is N-by-1-by-hidden_size, hn and cn are both 1-by-N-by-hidden_size
alignment_scores = torch.sum(encoder_hidden_states * out, dim=2, keepdim=True) # N-by-input_seq_len-by-1
context_vectors = torch.sum(encoder_hidden_states * alignment_scores, dim=1) # N-by-hidden_size
# torch.sum(out, dim=1) is a neat way to squeeze the dimensions to N-by-hidden_size, since
# torch.squeeze(out) won't with a batch size N = 1.
concat = torch.cat((torch.sum(out, dim=1), context_vectors), dim=1) # N-by-hidden_size*2
concat = self.softmax(self.fc(concat)) # N-by-vocab_size
return concat, hn, cn
def train(input_batch, target_batch, e_lens, v_lens, encoder, decoder, encoder_optim, decoder_optim, loss_fn, device):
all_encoder_hidden_states, all_encoder_hn, all_encoder_cn = encoder(input_batch, e_lens) # (N, seq_len, hidden_size), (1, N, hidden_size), (1, N, hidden_size)
decoder_inputs = target_batch[:,0:1] # N-by-1; the <s> from each sequence
prev_hn = all_encoder_hn
prev_cn = all_encoder_cn
preds = []
targets = []
max_seq_len = max(v_lens)
for time_step in range(max_seq_len - 1):
outputs, hn, cn = decoder(decoder_inputs, prev_hn, prev_cn, all_encoder_hidden_states, device)
preds.append(outputs)
targets.append(target_batch[:,time_step+1])
top_pred_vals, indices = outputs.topk(1) # N-by-1 and N-by-1
decoder_inputs = indices.detach()
prev_hn = hn
prev_cn = cn
loss = loss_fn(torch.cat(preds, dim=0), torch.cat(targets, dim=0))
encoder_optim.zero_grad()
decoder_optim.zero_grad()
loss.backward()
encoder_optim.step()
decoder_optim.step()
return loss.item()
def evaluate(input_seq, input_seq_len, encoder, decoder, cutoff=300):
encoder_hidden_states, encoder_hn, encoder_cn = encoder(input_seq, input_seq_len)
decoder_input = torch.tensor(2)
prev_hn = encoder_hn
prev_cn = encoder_cn
predicted_indices = []
# Model could potentially keep generating words on forever; use a cutoff limit to restrict this
for i in range(cutoff):
output, hn, cn = decoder(torch.tensor([[decoder_input.item()]]), prev_hn, prev_cn, encoder_hidden_states, device)
prev_hn = hn
prev_cn = cn
top_pred_val, top_pred_idx = output.topk(1) # largest output and its corresponding index
decoder_input = torch.tensor(top_pred_idx.item())
predicted_indices.append(decoder_input.item())
if decoder_input.item() == 3: break # predicted '</s>', so stop
return torch.tensor(predicted_indices)
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since):
now = time.time()
s = now - since
return asMinutes(s)
if __name__ == "__main__":
# Check GPU availability
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
print()
total_train_set_size = 133300
batch_size = 100
# Change as needed
learning_rate = 0.005
momentum = 0.9
embedding_dim = 256
hidden_size = 512
num_epochs = 1
en_path = './data/train/train.en.txt'
viet_path = './data/train/train.vi.txt'
en_vocab_path = './data/vocab/vocab.en.txt'
viet_vocab_path = './data/vocab/vocab.vi.txt'
train_dataset = EnVietDataset(en_path, viet_path, en_vocab_path, viet_vocab_path)
train_loader = DataLoader(train_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)
# Print out some random examples from the data
print("Data examples:")
random_indices = torch.randperm(len(train_dataset))[:8].tolist()
for index in random_indices:
en_indices, viet_indices = train_dataset.en_inputs[index], train_dataset.viet_translations[index]
en_input = train_dataset.indices_to_tokens(en_indices, lang='en')
viet_translation = train_dataset.indices_to_tokens(viet_indices, lang='viet')
print(f"English: {en_input}. Vietnamese: {viet_translation}")
print()
encoder = AttnEncoderRNN(len(train_dataset.en_vocab), embedding_dim, hidden_size)
decoder = AttnDecoderRNN(len(train_dataset.viet_vocab), embedding_dim, hidden_size)
encoder.to(device)
decoder.to(device)
encoder_optim = optim.SGD(encoder.parameters(), lr=learning_rate, momentum=momentum)
decoder_optim = optim.SGD(decoder.parameters(), lr=learning_rate, momentum=momentum)
loss_fn = nn.NLLLoss()
training_losses = []
start = time.time()
for epoch in range(num_epochs):
encoder.train()
decoder.train()
for i, data in enumerate(train_loader):
# Training on Colab's GPU throws an ambiguous error probably involving the last batch not having batch_size since
# it's all the remaining sentence pairs (which is less than batch_size). So, don't train on the last batch.
if i == (total_train_set_size // batch_size): break
en, viet, e_lens, v_lens = data[0][0].to(device), data[0][1].to(device), data[1], data[2]
batch_loss = train(en, viet, e_lens, v_lens, encoder, decoder, encoder_optim, decoder_optim, loss_fn, device)
training_losses.append(batch_loss)
# Print every 20 mini-batches
if i % 20 == 19:
print(f'[Epoch {epoch + 1}, Batch {i + 1} ({(i + 1) * batch_size} translations)] ({timeSince(start)}): {batch_loss}')
plt.figure(1)
plt.title('NLL Loss per Batch')
plt.xlabel(f'Batch (1 batch = {batch_size} translations)')
plt.ylabel('NLL Loss')
plt.plot(training_losses)
plt.show()
torch.save(encoder.state_dict(), './src/trained_models/attention_encoder.pth')
torch.save(decoder.state_dict(), './src/trained_models/attention_decoder.pth')
test_enc = AttnEncoderRNN(len(train_dataset.en_vocab), embedding_dim, hidden_size)
test_enc.load_state_dict(torch.load('./src/trained_models/attention_encoder.pth'))
test_dec = AttnDecoderRNN(len(train_dataset.viet_vocab), embedding_dim, hidden_size)
test_dec.load_state_dict(torch.load('./src/trained_models/attention_decoder.pth'))
test_enc.eval()
test_dec.eval()
while True:
en_input = input('> English: ')
if en_input == '<STOP>': break
en_input_tokens = en_input.strip().split(' ')
en_input_tokens.insert(0, '<s>')
en_input_tokens.append('</s>')
en_input_indices = train_dataset.tokens_to_indices(en_input_tokens, lang='en')
test_en_input = torch.zeros((1, len(en_input_tokens)))
test_en_input[0] = en_input_indices
test_en_input = test_en_input.long()
test_en_input = test_en_input
with torch.no_grad():
predicted_indices = evaluate(test_en_input, [len(en_input_tokens)], test_enc, test_dec)
print(f'> Vietnamese: {train_dataset.indices_to_tokens(predicted_indices, lang="viet")}')
print()
```
|
github_jupyter
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import time
import math
import os
class EnVietDataset(Dataset):
def __init__(self, en_path, viet_path, en_vocab_path, viet_vocab_path):
super().__init__()
en_inputs = []
viet_translations = []
with open(en_path, 'r', encoding='utf-8') as en_f:
for en_line in en_f.readlines():
en_sequence = en_line.strip()
en_tokens = en_sequence.split(' ')
en_tokens.insert(0, '<s>')
en_tokens.append('</s>')
en_inputs.append(en_tokens)
with open(viet_path, 'r', encoding='utf-8') as viet_f:
for viet_line in viet_f.readlines():
viet_sequence = viet_line.strip()
viet_tokens = viet_sequence.split(' ')
viet_tokens.insert(0, '<s>')
viet_tokens.append('</s>')
viet_translations.append(viet_tokens)
# Vocab maps english tokens to indices then reverse vocab maps indices to english tokens
en_vocab = self._build_vocab(en_vocab_path)
en_reverse_vocab = {index: token for token, index in en_vocab.items()}
# Vocab maps vietnamese tokens to indices then reverse vocab maps indices to vietnamese tokens
viet_vocab = self._build_vocab(viet_vocab_path)
viet_reverse_vocab = {index: token for token, index in viet_vocab.items()}
self.en_vocab = en_vocab
self.en_reverse_vocab = en_reverse_vocab
self.viet_vocab = viet_vocab
self.viet_reverse_vocab = viet_reverse_vocab
indexed_en_inputs = [self.tokens_to_indices(en_input, lang='en') for en_input in en_inputs]
indexed_viet_translations = [self.tokens_to_indices(viet_translation, lang='viet') for viet_translation in viet_translations]
self.en_inputs = indexed_en_inputs
self.viet_translations = indexed_viet_translations
def __getitem__(self, index):
return self.en_inputs[index], self.viet_translations[index]
def __len__(self):
return len(self.en_inputs)
@staticmethod
def _build_vocab(vocab_path):
"""Builds a vocab (dictionary) of word->index.
Args:
vocab_path (str): Path to the vocab.
Returns:
dict of word->index: The vocab of word->index.
"""
assert os.path.exists(vocab_path)
vocab = {'<pad>': 0}
token_id = 1
with open(vocab_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
token = line.strip()
vocab[token] = token_id
token_id += 1
return vocab
def tokens_to_indices(self, tokens, lang='en'):
"""Converts a list of tokens from strings to their corresponding indices in the specified vocab.
Args:
tokens (list of str's): Tokens to be converted.
lang (str, optional): Specifies which vocab to use. Defaults to 'en' for English. Other option
is 'viet' for Vietnamese.
Returns:
length-N tensor: Tensor containing the indices corresponding to each token.
"""
assert lang == 'en' or lang == 'viet'
indices = []
vocab = self.en_vocab if lang == 'en' else self.viet_vocab
unk_token = vocab['<unk>']
for token in tokens:
indices.append(vocab.get(token, unk_token))
return torch.tensor(indices)
def indices_to_tokens(self, indices, lang='en'):
"""Converts indices to tokens and concatenates them as a string.
Args:
indices (list of str's): A tensor of indices (with shape (N, 1) or length-N), a list of (1, 1) tensors,
or a list of indices (ints).
lang (str, optional): Specifies which vocab to use. Defaults to 'en' for English. Other option
is 'viet' for Vietnamese.
Returns:
str: String from concatenating the tokens.
"""
assert lang == 'en' or lang == 'viet'
tokens = []
reverse_vocab = self.en_reverse_vocab if lang == 'en' else self.viet_reverse_vocab
for index in indices:
if torch.is_tensor(index):
index = index.item()
token = reverse_vocab.get(index, '<unk>')
if token == '<pad>':
continue
tokens.append(token)
return " ".join(tokens)
def collate_fn(batch):
"""Create a batch of data given a list of N input sequences and output sequences. Returns a tuple
containing two tensors each with shape (N, max_sequence_length), where max_sequence_length is the
maximum length of any sequence in the batch.
Args:
batch (list): A list of size N, where each element is a tuple containing two sequence tensors.
Returns:
tuple of two tensors, list of ints, list of ints: A tuple containing two tensors each with
shape (N, max_sequence_length), list of each input sequence's length, and list of each target
sequence's length.
"""
en_inputs, viet_translations = zip(*batch)
max_en_input_length = 0
max_viet_translation_length = 0
e = []
v = []
e_lens = []
v_lens = []
for en_input in en_inputs:
en_input_length = list(en_input.size())[0]
e_lens.append(en_input_length)
if en_input_length > max_en_input_length:
max_en_input_length = en_input_length
for en_input in en_inputs:
en_input_length = list(en_input.size())[0]
if en_input_length < max_en_input_length:
e.append(torch.cat((en_input, torch.zeros(max_en_input_length - en_input_length, dtype=int))))
else:
e.append(en_input)
for viet_translation in viet_translations:
viet_translation_length = list(viet_translation.size())[0]
v_lens.append(viet_translation_length)
if viet_translation_length > max_viet_translation_length:
max_viet_translation_length = viet_translation_length
for viet_translation in viet_translations:
viet_translation_length = list(viet_translation.size())[0]
if viet_translation_length < max_viet_translation_length:
v.append(torch.cat((viet_translation, torch.zeros(max_viet_translation_length - viet_translation_length, dtype=int))))
else:
v.append(viet_translation)
return (torch.stack(e), torch.stack(v)), e_lens, v_lens
class AttnEncoderRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_size):
super().__init__()
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=0)
self.rnn = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=1, batch_first=True)
def forward(self, batch_sequences, seq_lens):
"""Forward pass through the encoder.
Args:
batch_sequences (N-by-seq_len tensor): Batch containing N length-seq_len tensors
(e.g., the sequences to be translated). N is the batch size.
seq_lens (list of ints): List of sequences lengths of each batch element.
Returns:
tuple of one (N, seq_len, hidden_size) tensor and two (1, N, hidden_size) tensors: All hidden states of each
sequence in the batch and (hn, cn) from the RNN (LSTM) layer.
"""
batch_sequences = self.embedding(batch_sequences) # N-by-seq_len-by-embedding_dim
packed_batch_sequences = nn.utils.rnn.pack_padded_sequence(batch_sequences, lengths=seq_lens, batch_first=True, enforce_sorted=False)
out, (hn, cn) = self.rnn(packed_batch_sequences) # hn and cn are both 1-by-N-by-hidden_size
# Unpack output from RNN (LSTM) layer. out_padded is N-by-seq_len-by-hidden_size
out_padded, _ = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
# out_padded: all hidden states of each sequence in the batch
# hn: the final hidden state of each sequence in the batch
# cn: final cell state of each sequence in the batch
return out_padded, hn, cn
class AttnDecoderRNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=0)
self.rnn = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_size, num_layers=1, batch_first=True)
self.fc = nn.Linear(hidden_size * 2, vocab_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, prev_outputs, prev_hn, prev_cn, encoder_hidden_states, device):
"""Forward pass through the decoder.
Args:
prev_outputs (N-by-1): The ouputs from the previous time step. N is the batch size.
prev_hn (1-by-N-by-hidden_size tensor):
prev_cn (1-by-N-by-hidden_size tensor):
encoder_hidden_states (N-input_seq_len-by-hidden_size tensor):
Returns:
tuple of one (N, vocab_size) tensor and two (1, N, hidden_size) tensors: The predicted outputs and (hn, cn) from
the RNN (LSTM) layer.
"""
embeddings = self.embedding(prev_outputs) # N-by-1-by-embedding_dim
out, (hn, cn) = self.rnn(embeddings, (prev_hn, prev_cn)) # out is N-by-1-by-hidden_size, hn and cn are both 1-by-N-by-hidden_size
alignment_scores = torch.sum(encoder_hidden_states * out, dim=2, keepdim=True) # N-by-input_seq_len-by-1
context_vectors = torch.sum(encoder_hidden_states * alignment_scores, dim=1) # N-by-hidden_size
# torch.sum(out, dim=1) is a neat way to squeeze the dimensions to N-by-hidden_size, since
# torch.squeeze(out) won't with a batch size N = 1.
concat = torch.cat((torch.sum(out, dim=1), context_vectors), dim=1) # N-by-hidden_size*2
concat = self.softmax(self.fc(concat)) # N-by-vocab_size
return concat, hn, cn
def train(input_batch, target_batch, e_lens, v_lens, encoder, decoder, encoder_optim, decoder_optim, loss_fn, device):
all_encoder_hidden_states, all_encoder_hn, all_encoder_cn = encoder(input_batch, e_lens) # (N, seq_len, hidden_size), (1, N, hidden_size), (1, N, hidden_size)
decoder_inputs = target_batch[:,0:1] # N-by-1; the <s> from each sequence
prev_hn = all_encoder_hn
prev_cn = all_encoder_cn
preds = []
targets = []
max_seq_len = max(v_lens)
for time_step in range(max_seq_len - 1):
outputs, hn, cn = decoder(decoder_inputs, prev_hn, prev_cn, all_encoder_hidden_states, device)
preds.append(outputs)
targets.append(target_batch[:,time_step+1])
top_pred_vals, indices = outputs.topk(1) # N-by-1 and N-by-1
decoder_inputs = indices.detach()
prev_hn = hn
prev_cn = cn
loss = loss_fn(torch.cat(preds, dim=0), torch.cat(targets, dim=0))
encoder_optim.zero_grad()
decoder_optim.zero_grad()
loss.backward()
encoder_optim.step()
decoder_optim.step()
return loss.item()
def evaluate(input_seq, input_seq_len, encoder, decoder, cutoff=300):
encoder_hidden_states, encoder_hn, encoder_cn = encoder(input_seq, input_seq_len)
decoder_input = torch.tensor(2)
prev_hn = encoder_hn
prev_cn = encoder_cn
predicted_indices = []
# Model could potentially keep generating words on forever; use a cutoff limit to restrict this
for i in range(cutoff):
output, hn, cn = decoder(torch.tensor([[decoder_input.item()]]), prev_hn, prev_cn, encoder_hidden_states, device)
prev_hn = hn
prev_cn = cn
top_pred_val, top_pred_idx = output.topk(1) # largest output and its corresponding index
decoder_input = torch.tensor(top_pred_idx.item())
predicted_indices.append(decoder_input.item())
if decoder_input.item() == 3: break # predicted '</s>', so stop
return torch.tensor(predicted_indices)
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since):
now = time.time()
s = now - since
return asMinutes(s)
if __name__ == "__main__":
# Check GPU availability
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
print()
total_train_set_size = 133300
batch_size = 100
# Change as needed
learning_rate = 0.005
momentum = 0.9
embedding_dim = 256
hidden_size = 512
num_epochs = 1
en_path = './data/train/train.en.txt'
viet_path = './data/train/train.vi.txt'
en_vocab_path = './data/vocab/vocab.en.txt'
viet_vocab_path = './data/vocab/vocab.vi.txt'
train_dataset = EnVietDataset(en_path, viet_path, en_vocab_path, viet_vocab_path)
train_loader = DataLoader(train_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)
# Print out some random examples from the data
print("Data examples:")
random_indices = torch.randperm(len(train_dataset))[:8].tolist()
for index in random_indices:
en_indices, viet_indices = train_dataset.en_inputs[index], train_dataset.viet_translations[index]
en_input = train_dataset.indices_to_tokens(en_indices, lang='en')
viet_translation = train_dataset.indices_to_tokens(viet_indices, lang='viet')
print(f"English: {en_input}. Vietnamese: {viet_translation}")
print()
encoder = AttnEncoderRNN(len(train_dataset.en_vocab), embedding_dim, hidden_size)
decoder = AttnDecoderRNN(len(train_dataset.viet_vocab), embedding_dim, hidden_size)
encoder.to(device)
decoder.to(device)
encoder_optim = optim.SGD(encoder.parameters(), lr=learning_rate, momentum=momentum)
decoder_optim = optim.SGD(decoder.parameters(), lr=learning_rate, momentum=momentum)
loss_fn = nn.NLLLoss()
training_losses = []
start = time.time()
for epoch in range(num_epochs):
encoder.train()
decoder.train()
for i, data in enumerate(train_loader):
# Training on Colab's GPU throws an ambiguous error probably involving the last batch not having batch_size since
# it's all the remaining sentence pairs (which is less than batch_size). So, don't train on the last batch.
if i == (total_train_set_size // batch_size): break
en, viet, e_lens, v_lens = data[0][0].to(device), data[0][1].to(device), data[1], data[2]
batch_loss = train(en, viet, e_lens, v_lens, encoder, decoder, encoder_optim, decoder_optim, loss_fn, device)
training_losses.append(batch_loss)
# Print every 20 mini-batches
if i % 20 == 19:
print(f'[Epoch {epoch + 1}, Batch {i + 1} ({(i + 1) * batch_size} translations)] ({timeSince(start)}): {batch_loss}')
plt.figure(1)
plt.title('NLL Loss per Batch')
plt.xlabel(f'Batch (1 batch = {batch_size} translations)')
plt.ylabel('NLL Loss')
plt.plot(training_losses)
plt.show()
torch.save(encoder.state_dict(), './src/trained_models/attention_encoder.pth')
torch.save(decoder.state_dict(), './src/trained_models/attention_decoder.pth')
test_enc = AttnEncoderRNN(len(train_dataset.en_vocab), embedding_dim, hidden_size)
test_enc.load_state_dict(torch.load('./src/trained_models/attention_encoder.pth'))
test_dec = AttnDecoderRNN(len(train_dataset.viet_vocab), embedding_dim, hidden_size)
test_dec.load_state_dict(torch.load('./src/trained_models/attention_decoder.pth'))
test_enc.eval()
test_dec.eval()
while True:
en_input = input('> English: ')
if en_input == '<STOP>': break
en_input_tokens = en_input.strip().split(' ')
en_input_tokens.insert(0, '<s>')
en_input_tokens.append('</s>')
en_input_indices = train_dataset.tokens_to_indices(en_input_tokens, lang='en')
test_en_input = torch.zeros((1, len(en_input_tokens)))
test_en_input[0] = en_input_indices
test_en_input = test_en_input.long()
test_en_input = test_en_input
with torch.no_grad():
predicted_indices = evaluate(test_en_input, [len(en_input_tokens)], test_enc, test_dec)
print(f'> Vietnamese: {train_dataset.indices_to_tokens(predicted_indices, lang="viet")}')
print()
| 0.785679 | 0.346873 |
# KENV
<a href=mailto:[email protected]>V. Fedorov</a>, <a href=mailto:[email protected]>D. Nikiforov</a>, <a href=http://www.inp.nsk.su/~petrenko/>A. Petrenko</a>, (Novosibirsk, 2019)
## Manual
```
import kenv as kv
```
Сan call for help:
```
#help(kv)
```
## Simulation
To obtain an envelope, you need to create a beam and an accelerator. Then load the simulation of this beam in this accelerator and apply tracking.
```
beam = kv.Beam(energy=2,
current=2e3,
radius=50e-3,
x = 30e-3,
y = 0,
rp=55.0e-3,
normalized_emittance=1000e-6)
```
Can print:
```
print(beam)
accelerator = kv.Accelerator(0.7, 15, 0.001)
Solenoids = [
[ 0.95, 0.001, 'Bz.dat', 'Sol. 1' ],
[ 2.1, 0.03, 'Bz.dat', 'Sol. 2' ],
[ 2.9077, 0.001, 'Bz.dat', 'Sol. 3' ],
[ 4.0024, 0.03, 'Bz.dat', 'Sol. 4' ],
[ 5.642, 0.03, 'Bz.dat', 'Sol. 5' ],
[ 6.760, 0.001, 'Bz.dat', 'Sol. 6' ],
]
Accels = [
[ 7.456, -0.9, 'Ez.dat', 'Cavity 3'],
[ 8.838, -0.9, 'Ez.dat', 'Cavity 4'],
[ 10.220, -0.9, 'Ez.dat', 'Cavity 5'],
[ 11.602, -0.9, 'Ez.dat', 'Cavity 6'],
[ 12.984, -0.9, 'Ez.dat', 'Cavity 7'],
[ 14.366, -0.9, 'Ez.dat', 'Cavity 8'],
]
for z0, B0, filename, name in Solenoids:
accelerator.Bz_beamline[name] = kv.Element(z0, B0, filename, name)
for z0, E0, filename, name in Accels:
accelerator.Ez_beamline[name] = kv.Element(z0, E0, filename, name)
accelerator.compile()
simulation = kv.Simulation(beam, accelerator)
simulation.track()
```
## Graphic with holoviews
### matplotlib
```
import holoviews as hv
hv.extension('matplotlib')
%opts Layout [tight=True]
%output size=250 backend='matplotlib' fig='png'
%opts Area Curve [aspect=3 show_grid=True]
%opts Area (alpha=0.25)
%opts Curve (alpha=0.5)
%opts Area.Beam [aspect=3 show_grid=True] (color='red' alpha=0.3)
import warnings
warnings.filterwarnings('ignore')
dim_z = hv.Dimension('z', unit='m', range=(accelerator.start, accelerator.stop))
dim_z_zoom = hv.Dimension('z_zoom', unit='m', range=(7, 8.0))
dim_Bz = hv.Dimension('Bz', unit='T', label='Bz')
dim_Ez = hv.Dimension('Ez', unit='MV/m', label='Ez')
dim_dEzdz = hv.Dimension('dEzdz', unit='$\mathrm{MV/m^2}$', label='$dE_z/dz$', range=(-5,+5))
dim_dEzdz_zoom = hv.Dimension('dEzdz_zoom', unit='$\mathrm{MV/m^2}$', label='$dE_z/dz$_zoom', range=(-8,+8))
dim_dBzdz = hv.Dimension('dBzdz', unit='T/m', label='$dB_z/dz$', range=(-0.22,+0.22))
dim_x = hv.Dimension('x', label="Coordinate", unit='mm', range=(-40,40))
dim_y = hv.Dimension('y', label="Coordinate", unit='mm', range=(-40,40))
dim_r = dim_x
dim_phase = hv.Dimension('phase', label="Larmor phase $\Psi$", unit='rad')
z_Ez = hv.Area((accelerator.parameter,accelerator.Ez(accelerator.parameter)), kdims=dim_z, vdims=dim_Ez)
z_dEzdz = hv.Area((accelerator.parameter,accelerator.dEzdz(accelerator.parameter)), kdims=dim_z, vdims=dim_dEzdz)
z_dEzdz_zoom = hv.Area((accelerator.parameter,accelerator.dEzdz(accelerator.parameter)), kdims=dim_z_zoom, vdims=dim_dEzdz_zoom)
z_Bz = hv.Area((accelerator.parameter,accelerator.Bz(accelerator.parameter)), kdims=dim_z, vdims=dim_Bz)
z_dBzdz = hv.Area((accelerator.parameter,accelerator.dBzdz(accelerator.parameter)), kdims=dim_z, vdims=dim_dBzdz)
z_r = hv.Area(((accelerator.parameter,simulation.envelope_x(accelerator.parameter)*1e3)), kdims=[dim_z], vdims=[dim_r], group='Beam')
z_x_centroid = hv.Curve(((accelerator.parameter,simulation.centroid_x(accelerator.parameter)*1e3)), kdims=[dim_z], vdims=[dim_x], group='Beam', label='x')
z_y_centroid = hv.Curve(((accelerator.parameter,simulation.centroid_y(accelerator.parameter)*1e3)), kdims=[dim_z], vdims=[dim_y], group='Beam', label='y')
z_larmor_phase = hv.Curve(((accelerator.parameter,simulation.larmor_angle(accelerator.parameter))), kdims=[dim_z], vdims=[dim_phase])
(z_x_centroid*z_y_centroid+z_larmor_phase).cols(1)
(z_Bz + z_Ez + z_dEzdz + z_dEzdz_zoom).cols(1)
```
|
github_jupyter
|
import kenv as kv
#help(kv)
beam = kv.Beam(energy=2,
current=2e3,
radius=50e-3,
x = 30e-3,
y = 0,
rp=55.0e-3,
normalized_emittance=1000e-6)
print(beam)
accelerator = kv.Accelerator(0.7, 15, 0.001)
Solenoids = [
[ 0.95, 0.001, 'Bz.dat', 'Sol. 1' ],
[ 2.1, 0.03, 'Bz.dat', 'Sol. 2' ],
[ 2.9077, 0.001, 'Bz.dat', 'Sol. 3' ],
[ 4.0024, 0.03, 'Bz.dat', 'Sol. 4' ],
[ 5.642, 0.03, 'Bz.dat', 'Sol. 5' ],
[ 6.760, 0.001, 'Bz.dat', 'Sol. 6' ],
]
Accels = [
[ 7.456, -0.9, 'Ez.dat', 'Cavity 3'],
[ 8.838, -0.9, 'Ez.dat', 'Cavity 4'],
[ 10.220, -0.9, 'Ez.dat', 'Cavity 5'],
[ 11.602, -0.9, 'Ez.dat', 'Cavity 6'],
[ 12.984, -0.9, 'Ez.dat', 'Cavity 7'],
[ 14.366, -0.9, 'Ez.dat', 'Cavity 8'],
]
for z0, B0, filename, name in Solenoids:
accelerator.Bz_beamline[name] = kv.Element(z0, B0, filename, name)
for z0, E0, filename, name in Accels:
accelerator.Ez_beamline[name] = kv.Element(z0, E0, filename, name)
accelerator.compile()
simulation = kv.Simulation(beam, accelerator)
simulation.track()
import holoviews as hv
hv.extension('matplotlib')
%opts Layout [tight=True]
%output size=250 backend='matplotlib' fig='png'
%opts Area Curve [aspect=3 show_grid=True]
%opts Area (alpha=0.25)
%opts Curve (alpha=0.5)
%opts Area.Beam [aspect=3 show_grid=True] (color='red' alpha=0.3)
import warnings
warnings.filterwarnings('ignore')
dim_z = hv.Dimension('z', unit='m', range=(accelerator.start, accelerator.stop))
dim_z_zoom = hv.Dimension('z_zoom', unit='m', range=(7, 8.0))
dim_Bz = hv.Dimension('Bz', unit='T', label='Bz')
dim_Ez = hv.Dimension('Ez', unit='MV/m', label='Ez')
dim_dEzdz = hv.Dimension('dEzdz', unit='$\mathrm{MV/m^2}$', label='$dE_z/dz$', range=(-5,+5))
dim_dEzdz_zoom = hv.Dimension('dEzdz_zoom', unit='$\mathrm{MV/m^2}$', label='$dE_z/dz$_zoom', range=(-8,+8))
dim_dBzdz = hv.Dimension('dBzdz', unit='T/m', label='$dB_z/dz$', range=(-0.22,+0.22))
dim_x = hv.Dimension('x', label="Coordinate", unit='mm', range=(-40,40))
dim_y = hv.Dimension('y', label="Coordinate", unit='mm', range=(-40,40))
dim_r = dim_x
dim_phase = hv.Dimension('phase', label="Larmor phase $\Psi$", unit='rad')
z_Ez = hv.Area((accelerator.parameter,accelerator.Ez(accelerator.parameter)), kdims=dim_z, vdims=dim_Ez)
z_dEzdz = hv.Area((accelerator.parameter,accelerator.dEzdz(accelerator.parameter)), kdims=dim_z, vdims=dim_dEzdz)
z_dEzdz_zoom = hv.Area((accelerator.parameter,accelerator.dEzdz(accelerator.parameter)), kdims=dim_z_zoom, vdims=dim_dEzdz_zoom)
z_Bz = hv.Area((accelerator.parameter,accelerator.Bz(accelerator.parameter)), kdims=dim_z, vdims=dim_Bz)
z_dBzdz = hv.Area((accelerator.parameter,accelerator.dBzdz(accelerator.parameter)), kdims=dim_z, vdims=dim_dBzdz)
z_r = hv.Area(((accelerator.parameter,simulation.envelope_x(accelerator.parameter)*1e3)), kdims=[dim_z], vdims=[dim_r], group='Beam')
z_x_centroid = hv.Curve(((accelerator.parameter,simulation.centroid_x(accelerator.parameter)*1e3)), kdims=[dim_z], vdims=[dim_x], group='Beam', label='x')
z_y_centroid = hv.Curve(((accelerator.parameter,simulation.centroid_y(accelerator.parameter)*1e3)), kdims=[dim_z], vdims=[dim_y], group='Beam', label='y')
z_larmor_phase = hv.Curve(((accelerator.parameter,simulation.larmor_angle(accelerator.parameter))), kdims=[dim_z], vdims=[dim_phase])
(z_x_centroid*z_y_centroid+z_larmor_phase).cols(1)
(z_Bz + z_Ez + z_dEzdz + z_dEzdz_zoom).cols(1)
| 0.382949 | 0.86866 |
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from importlib import reload
import analysis
local_data = pd.read_csv('MapFileData-WithCountyResultsAndCovariates.csv')
national_data = pd.read_csv('national_data.csv')
before = 'm.RR_{}_Armed_Versus_Unarmed'
after = 'm.RR_{}_Unarmed_Versus_Armed'
# Create one more column for simplicity with inverse ratios
for r in ['Black', 'White', 'Hispanic']:
local_data[after.format(r)] = 1. / local_data[before.format(r)]
national_data[after.format(r)] = 1. / national_data[before.format(r)]
county = 'San Francisco'
local_data.loc[local_data['county'] == county][national_data.columns]
national_data
l_edge = 2.5
r_edge = .5
b_edge = .5
t_edge = .5
```
# Unarmed vs. Unarmed
```
features = ['m.RR_Black_Unarmed_Versus_White_Unarmed', 'm.RR_Hispanic_Unarmed_Versus_White_Unarmed']
reload(analysis)
plt.close()
max_ratio = analysis.get_max_ratio(national_data, local_data, county, features)
height = 2 * len(features) + len(features) // 2 + 1
figsize = (2 * np.ceil(max_ratio) + l_edge + r_edge, height + t_edge + b_edge)
print('x inches: {}'.format(figsize[0]))
f = plt.figure(figsize=figsize)
ax = f.add_axes([ l_edge / figsize[0], b_edge / figsize[1],
2 * np.ceil(max_ratio) / figsize[0], height / figsize[1]])
analysis.make_barplot(national_data, local_data, county, features, np.ceil(max_ratio), ax)
plt.savefig('{}_{}.png'.format(county, 'U-U'), dpi=300)
```
# Armed vs. Armed
```
features = ['m.RR_Black_Armed_Versus_White_Armed', 'm.RR_Hispanic_Armed_Versus_White_Armed']
reload(analysis)
plt.close()
max_ratio = analysis.get_max_ratio(national_data, local_data, county, features)
height = 2 * len(features) + len(features) // 2 + 1
figsize = (2 * np.ceil(max_ratio) + l_edge + r_edge, height + t_edge + b_edge)
print('x inches: {}'.format(figsize[0]))
f = plt.figure(figsize=figsize)
ax = f.add_axes([ l_edge / figsize[0], b_edge / figsize[1],
2 * np.ceil(max_ratio) / figsize[0], height / figsize[1]])
analysis.make_barplot(national_data, local_data, county, features, np.ceil(max_ratio), ax)
plt.savefig('{}_{}.png'.format(county, 'A-A'), dpi=300)
```
# Unarmed vs. Armed
```
features = ['m.RR_Black_Unarmed_Versus_White_Armed',
'm.RR_Hispanic_Unarmed_Versus_White_Armed',
'm.RR_White_Unarmed_Versus_Armed']
reload(analysis)
plt.close()
max_ratio = analysis.get_max_ratio(national_data, local_data, county, features)
height = 2 * len(features) + len(features) // 2 + 1
figsize = (2 * np.ceil(max_ratio) + l_edge + r_edge, height + t_edge + b_edge)
print('x inches: {}'.format(figsize[0]))
f = plt.figure(figsize=figsize)
ax = f.add_axes([ l_edge / figsize[0], b_edge / figsize[1],
2 * np.ceil(max_ratio) / figsize[0], height / figsize[1]])
analysis.make_barplot(national_data, local_data, county, features, np.ceil(max_ratio), ax)
plt.savefig('{}_{}.png'.format(county, 'U-A'), dpi=300)
```
|
github_jupyter
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from importlib import reload
import analysis
local_data = pd.read_csv('MapFileData-WithCountyResultsAndCovariates.csv')
national_data = pd.read_csv('national_data.csv')
before = 'm.RR_{}_Armed_Versus_Unarmed'
after = 'm.RR_{}_Unarmed_Versus_Armed'
# Create one more column for simplicity with inverse ratios
for r in ['Black', 'White', 'Hispanic']:
local_data[after.format(r)] = 1. / local_data[before.format(r)]
national_data[after.format(r)] = 1. / national_data[before.format(r)]
county = 'San Francisco'
local_data.loc[local_data['county'] == county][national_data.columns]
national_data
l_edge = 2.5
r_edge = .5
b_edge = .5
t_edge = .5
features = ['m.RR_Black_Unarmed_Versus_White_Unarmed', 'm.RR_Hispanic_Unarmed_Versus_White_Unarmed']
reload(analysis)
plt.close()
max_ratio = analysis.get_max_ratio(national_data, local_data, county, features)
height = 2 * len(features) + len(features) // 2 + 1
figsize = (2 * np.ceil(max_ratio) + l_edge + r_edge, height + t_edge + b_edge)
print('x inches: {}'.format(figsize[0]))
f = plt.figure(figsize=figsize)
ax = f.add_axes([ l_edge / figsize[0], b_edge / figsize[1],
2 * np.ceil(max_ratio) / figsize[0], height / figsize[1]])
analysis.make_barplot(national_data, local_data, county, features, np.ceil(max_ratio), ax)
plt.savefig('{}_{}.png'.format(county, 'U-U'), dpi=300)
features = ['m.RR_Black_Armed_Versus_White_Armed', 'm.RR_Hispanic_Armed_Versus_White_Armed']
reload(analysis)
plt.close()
max_ratio = analysis.get_max_ratio(national_data, local_data, county, features)
height = 2 * len(features) + len(features) // 2 + 1
figsize = (2 * np.ceil(max_ratio) + l_edge + r_edge, height + t_edge + b_edge)
print('x inches: {}'.format(figsize[0]))
f = plt.figure(figsize=figsize)
ax = f.add_axes([ l_edge / figsize[0], b_edge / figsize[1],
2 * np.ceil(max_ratio) / figsize[0], height / figsize[1]])
analysis.make_barplot(national_data, local_data, county, features, np.ceil(max_ratio), ax)
plt.savefig('{}_{}.png'.format(county, 'A-A'), dpi=300)
features = ['m.RR_Black_Unarmed_Versus_White_Armed',
'm.RR_Hispanic_Unarmed_Versus_White_Armed',
'm.RR_White_Unarmed_Versus_Armed']
reload(analysis)
plt.close()
max_ratio = analysis.get_max_ratio(national_data, local_data, county, features)
height = 2 * len(features) + len(features) // 2 + 1
figsize = (2 * np.ceil(max_ratio) + l_edge + r_edge, height + t_edge + b_edge)
print('x inches: {}'.format(figsize[0]))
f = plt.figure(figsize=figsize)
ax = f.add_axes([ l_edge / figsize[0], b_edge / figsize[1],
2 * np.ceil(max_ratio) / figsize[0], height / figsize[1]])
analysis.make_barplot(national_data, local_data, county, features, np.ceil(max_ratio), ax)
plt.savefig('{}_{}.png'.format(county, 'U-A'), dpi=300)
| 0.433502 | 0.727153 |
```
import pandas as pd
import numpy as np
import os, re, sys, json
from bs4 import BeautifulSoup
# get list of xml files
xmls = np.array(os.listdir('OBO_XML_7-2/sessionsPapers')[1:])
# split files into 25 or 50 year spans
span_labels = ['1674-1699', '1700-1749', '1750-1799', '1800-1824', '1825-1849',
'1850-1874', '1875-1899', '1900-1913']
spans = [xmls[np.all([xmls > s[:4], xmls < s[5:] + '9999.xml'], axis=0)] for s in span_labels]
# check that split files add up to total
sum([len(s) for s in spans]) == len(xmls)
def session_to_df(xml, data):
''' Return a dataframe with one row per trial in the session'''
# read in file
with open(xml) as f:
try:
session = f.read()
except UnicodeDecodeError:
print(xml + " couldn't be read")
return data
soup = BeautifulSoup(session, 'xml')
# separate session into trials
trials = soup.find_all('div1', type='trialAccount')
# get the session id
session_id = soup.find('div0').get('id')
# iterate through trials
for trial in trials:
trial_soup = BeautifulSoup(str(trial), 'xml')
# get the text
trial_txt = trial_soup.get_text()
# remove leading/trailing new lines, extra new lines, extra spaces
trial_txt = re.sub(r'^\n+|\n+$', '', trial_txt)
trial_txt = re.sub(r'\n', '', trial_txt)
trial_txt = re.sub(r'\s\s+', ' ', trial_txt)
# add to data dictionary
data['transcript'].append(trial_txt)
# Get offense category and subcategory;
# Note: we simplify here - only a small percentage of trials have
# ... several offense categories, and when they do, it is nearly always
# ... various subcategories of theft.
# So we only save the first offense-suboffense we encounter for each trial.
# We also guard ourselves against somebody having forgotten to mark the category.
try:
mainc = trial_soup.find('interp',type='offenceCategory').get('value')
data['offense'].append(mainc.strip())
except AttributeError:
data['offense'].append('uncategorized')
try:
subc = trial_soup.find('interp',type='offenceSubcategory').get('value')
data['offense_subcategory'].append(subc.strip())
except AttributeError:
data['offense_subcategory'].append('none')
# Get verdict category/subcategory (first verdict only)
try:
mainc = trial_soup.find('interp',type='verdictCategory').get('value')
data['verdict'].append(mainc.strip())
except AttributeError:
data['verdict'].append('uncategorized')
try:
subc = trial_soup.find('interp',type='verdictSubcategory').get('value')
data['verdict_subcategory'].append(subc.strip())
except AttributeError:
data['verdict_subcategory'].append('none')
# get the punishment and sub-punishment (first only)
try:
mainc = trial_soup.find('interp',type='punishmentCategory').get('value')
data['punishment'].append(mainc.strip())
except AttributeError:
data['punishment'].append('uncategorized')
try:
subc = trial_soup.find('interp',type='punishmentSubcategory').get('value')
data['punishment_subcategory'].append(subc.strip())
except AttributeError:
data['punishment_subcategory'].append('none')
# get the trial id
data['trial_id'].append(trial_soup.find('div1').get('id'))
data['session'].append(session_id)
return data
# range of sessions from about 25 yrs before and after the Bloody Code repeal
bloody_span = xmls[np.all([xmls > '1800', xmls < '1850'], axis=0)]
data = defaultdict(list)
count = 0
for session in bloody_span:
if count % 25 == 0:
print('{}% processed'.format(count * 100 / len(bloody_span)))
data = session_to_df('OBO_XML_7-2/sessionsPapers/' + session, data)
count+=1
bloody_data = pd.DataFrame(data)
bloody_data.set_index('trial_id', inplace=True)
bloody_data.shape
bloody_data.to_csv('obc_1800_1850.csv')
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import os, re, sys, json
from bs4 import BeautifulSoup
# get list of xml files
xmls = np.array(os.listdir('OBO_XML_7-2/sessionsPapers')[1:])
# split files into 25 or 50 year spans
span_labels = ['1674-1699', '1700-1749', '1750-1799', '1800-1824', '1825-1849',
'1850-1874', '1875-1899', '1900-1913']
spans = [xmls[np.all([xmls > s[:4], xmls < s[5:] + '9999.xml'], axis=0)] for s in span_labels]
# check that split files add up to total
sum([len(s) for s in spans]) == len(xmls)
def session_to_df(xml, data):
''' Return a dataframe with one row per trial in the session'''
# read in file
with open(xml) as f:
try:
session = f.read()
except UnicodeDecodeError:
print(xml + " couldn't be read")
return data
soup = BeautifulSoup(session, 'xml')
# separate session into trials
trials = soup.find_all('div1', type='trialAccount')
# get the session id
session_id = soup.find('div0').get('id')
# iterate through trials
for trial in trials:
trial_soup = BeautifulSoup(str(trial), 'xml')
# get the text
trial_txt = trial_soup.get_text()
# remove leading/trailing new lines, extra new lines, extra spaces
trial_txt = re.sub(r'^\n+|\n+$', '', trial_txt)
trial_txt = re.sub(r'\n', '', trial_txt)
trial_txt = re.sub(r'\s\s+', ' ', trial_txt)
# add to data dictionary
data['transcript'].append(trial_txt)
# Get offense category and subcategory;
# Note: we simplify here - only a small percentage of trials have
# ... several offense categories, and when they do, it is nearly always
# ... various subcategories of theft.
# So we only save the first offense-suboffense we encounter for each trial.
# We also guard ourselves against somebody having forgotten to mark the category.
try:
mainc = trial_soup.find('interp',type='offenceCategory').get('value')
data['offense'].append(mainc.strip())
except AttributeError:
data['offense'].append('uncategorized')
try:
subc = trial_soup.find('interp',type='offenceSubcategory').get('value')
data['offense_subcategory'].append(subc.strip())
except AttributeError:
data['offense_subcategory'].append('none')
# Get verdict category/subcategory (first verdict only)
try:
mainc = trial_soup.find('interp',type='verdictCategory').get('value')
data['verdict'].append(mainc.strip())
except AttributeError:
data['verdict'].append('uncategorized')
try:
subc = trial_soup.find('interp',type='verdictSubcategory').get('value')
data['verdict_subcategory'].append(subc.strip())
except AttributeError:
data['verdict_subcategory'].append('none')
# get the punishment and sub-punishment (first only)
try:
mainc = trial_soup.find('interp',type='punishmentCategory').get('value')
data['punishment'].append(mainc.strip())
except AttributeError:
data['punishment'].append('uncategorized')
try:
subc = trial_soup.find('interp',type='punishmentSubcategory').get('value')
data['punishment_subcategory'].append(subc.strip())
except AttributeError:
data['punishment_subcategory'].append('none')
# get the trial id
data['trial_id'].append(trial_soup.find('div1').get('id'))
data['session'].append(session_id)
return data
# range of sessions from about 25 yrs before and after the Bloody Code repeal
bloody_span = xmls[np.all([xmls > '1800', xmls < '1850'], axis=0)]
data = defaultdict(list)
count = 0
for session in bloody_span:
if count % 25 == 0:
print('{}% processed'.format(count * 100 / len(bloody_span)))
data = session_to_df('OBO_XML_7-2/sessionsPapers/' + session, data)
count+=1
bloody_data = pd.DataFrame(data)
bloody_data.set_index('trial_id', inplace=True)
bloody_data.shape
bloody_data.to_csv('obc_1800_1850.csv')
| 0.114939 | 0.128061 |
# 09 - Beginner Exercises
* Lambda
---
## 🍩🍩🍩
1.Create a lambda function that takes an argument $x$ and returns $x^{2}$.
Then assign it to the variable Pow. then print Pow(2) , Pow(3) , Pow(1400) .
```
# Write your own code in this cell
Pow =
```
## 🍩🍩🍩
2.Create a lambda function that takes two argument $a$ and $b$ and returns $\frac{a + b}{a * b}$.
Then assign it to the variable **Alpha**. then print Alpha(1,2) , Alpha(2,3) , Alpha(3,4) .
```
# Write your own code in this cell
Alpha =
```
##🍩
3.We have a list of famous singers of each country. Print a list containing only their names using Lambda. use Lambda to write a function that, if called for each item of the **```persons```**, will return a tuple containing the **name** and **country** of each singer and do not return their age.
```
# Write your own code in this cell
persons = [("Paul David Hewson", 61, "Ireland"),
("Saeed Mohammadi", 63, "Iran"),
("Alla Borisovna Pugacheva", 77, "Russia"),
("David Jon Gilmour", 75, "United Kingdom"),
("Aryana Sayeed", 36, "Afghanistan"),
("Céline Marie Claudette Dion", 53, "Canad"),
("Caetano Emanuel Viana Telles Veloso", 79, "Brazil"),]
```
## 🍩
4.As you know, the sort method takes two Optional arguments. Reverse and key.
$$list.sort(reverse=True|False, key=myFunc)$$
$$sorted(iterable, key=key, reverse=reverse)$$
reverse=True will sort the list descending. Default is reverse=False
And the key's value can be equal to a function to specify the **sorting criteria(s)**
Using **```lambda```** and **```sort()```** method, please sort the list of singers below based on the **second letter** of their name.
In the next step, sort the list of singers based on the **last letter** of their name using the **```sorted()```** builtin function.
```
# Write your own code in this cell
singers = ["Paul David Hewson", "Saeed Mohammadi" , "Aryana Sayeed",
"Alla Borisovna Pugacheva", "Alla Borisovna Pugacheva",
"David Jon Gilmour", "Céline Marie Claudette Dion",
"Caetano Emanuel Viana Telles Veloso"]
```
## 🍩
5.Please arrange the list below by age, from oldest to youngest.
```
# Write your own code in this cell
persons = [("Paul David Hewson", 61, "Ireland"),
("Saeed Mohammadi", 63, "Iran"),
("Alla Borisovna Pugacheva", 77, "Russia"),
("David Jon Gilmour", 75, "United Kingdom"),
("Aryana Sayeed", 36, "Afghanistan"),
("Céline Marie Claudette Dion", 53, "Canad"),
("Caetano Emanuel Viana Telles Veloso", 79, "Brazil"),]
```
## 🌶️
6.Using Lambda and sorted() function, write a function that puts negative numbers to the left of the list and positive numbers to the right of the list.
```
# Write your own code in this cell
mylist = [-1 , -7 ,3,14,6,12,-2,9,2,1,-4]
```
|
github_jupyter
|
# Write your own code in this cell
Pow =
# Write your own code in this cell
Alpha =
# Write your own code in this cell
persons = [("Paul David Hewson", 61, "Ireland"),
("Saeed Mohammadi", 63, "Iran"),
("Alla Borisovna Pugacheva", 77, "Russia"),
("David Jon Gilmour", 75, "United Kingdom"),
("Aryana Sayeed", 36, "Afghanistan"),
("Céline Marie Claudette Dion", 53, "Canad"),
("Caetano Emanuel Viana Telles Veloso", 79, "Brazil"),]
# Write your own code in this cell
singers = ["Paul David Hewson", "Saeed Mohammadi" , "Aryana Sayeed",
"Alla Borisovna Pugacheva", "Alla Borisovna Pugacheva",
"David Jon Gilmour", "Céline Marie Claudette Dion",
"Caetano Emanuel Viana Telles Veloso"]
# Write your own code in this cell
persons = [("Paul David Hewson", 61, "Ireland"),
("Saeed Mohammadi", 63, "Iran"),
("Alla Borisovna Pugacheva", 77, "Russia"),
("David Jon Gilmour", 75, "United Kingdom"),
("Aryana Sayeed", 36, "Afghanistan"),
("Céline Marie Claudette Dion", 53, "Canad"),
("Caetano Emanuel Viana Telles Veloso", 79, "Brazil"),]
# Write your own code in this cell
mylist = [-1 , -7 ,3,14,6,12,-2,9,2,1,-4]
| 0.21305 | 0.965674 |
# 作业一:语言模型 Part2
* 书接上回,上一周实现了一个针对单句的语言模型,本周将在上周模型的基础上进一步探索
* 本周的尝试主要分为两个部分
1. 采用BinaryLogLoss+负例采样
2. 使用额外的上下文信息进行训练
**首先还是导入这次作业需要的包,并设置随机种子**
```
import datetime
import random
from collections import Counter, defaultdict
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchtext
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
set_random_seed(2020)
device = torch.device('cuda:0' if torch.cuda.is_available else 'cpu')
```
**设定计算设备与数据集路径**
```
device = torch.device('cuda' if torch.cuda.is_available else 'cpu')
data_path = Path('/media/bnu/data/nlp-practice/language-model')
print('PyTorch Version:', torch.__version__)
print('-' * 60)
if torch.cuda.is_available():
print('CUDA Device Count:', torch.cuda.device_count())
print('CUDA Device Name:')
for i in range(torch.cuda.device_count()):
print('\t', torch.cuda.get_device_name(i))
print('CUDA Current Device Index:', torch.cuda.current_device())
print('-' * 60)
print('Data Path:', data_path)
```
# BinaryLogLoss+负例采样
## 数据处理
### 定义单词表类
* 定义`Vocab`类用于存储单词表
* `Vocab`类当中包含了单词(token)与索引(index)之间的映射
```
class Vocab:
def __init__(self, vocab_path):
self.stoi = {} # token -> index (dict)
self.itos = [] # index -> token (list)
with open(vocab_path) as f:
# bobsue.voc.txt中,每一行是一个单词
for w in f.readlines():
w = w.strip()
if w not in self.stoi:
self.stoi[w] = len(self.itos)
self.itos.append(w)
def __len__(self):
return len(self.itos)
```
**简单测试**
```
vocab = Vocab(data_path / 'bobsue.voc.txt')
print('单词表大小:', len(vocab))
print('-' * 60)
print('样例(单词 -> 索引):')
print(list(vocab.stoi.items())[:5])
print('-' * 60)
print('样例(索引 -> 单词):')
print(list(enumerate(vocab.itos))[:5])
```
### 定义语料库
* 定义`Corpus`类读取训练集、验证集、测试集语料
* 语料文件中每一行都是一个句子,也就是我们训练时的一份样本
* 将语料中的句子读入后,根据`Vocab`转换成索引列表
* `Corpus`类当中包含了语料库中的单词的计数信息与词频信息
```
class Corpus:
def __init__(self, data_path, sort_by_len=False,
uniform=False, freq_coef=0.75):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.sort_by_len = sort_by_len
self.train_data = self.tokenize(data_path / 'bobsue.lm.train.txt')
self.valid_data = self.tokenize(data_path / 'bobsue.lm.dev.txt')
self.test_data = self.tokenize(data_path / 'bobsue.lm.test.txt')
# 统计训练集的单词计数
self.word_counter = Counter()
for x in self.train_data:
# 注意<s>不在我们的预测范围内,不要统计
self.word_counter += Counter(x[1:])
# 训练集中需要预测的总词数
total_words = len(list(self.word_counter.elements()))
if uniform: # 均匀分布
self.word_freqs = np.array(
[0.] + [1. for _ in range(len(self.vocab) - 1)],
dtype=np.float32
)
self.word_freqs = self.word_freqs / sum(self.word_freqs)
else: # 词频分布(提升freq_coef次方)
self.word_freqs = np.array(
[self.word_counter[i] for i in range(len(self.vocab))],
dtype=np.float32
)
self.word_freqs = self.word_freqs / sum(self.word_freqs)
self.word_freqs = self.word_freqs ** freq_coef
self.word_freqs = self.word_freqs / sum(self.word_freqs)
def tokenize(self, text_path):
with open(text_path) as f:
index_data = [] # 索引数据,存储每个样本的单词索引列表
for s in f.readlines():
index_data.append(
self.sentence_to_index(s)
)
if self.sort_by_len: # 为了提升训练速度,可以考虑将样本按照长度排序,这样可以减少padding
index_data = sorted(index_data, key=lambda x: len(x), reverse=True)
return index_data
def sentence_to_index(self, s):
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x):
return ' '.join([self.vocab.itos[i] for i in x])
```
**简单测试**
```
corpus = Corpus(data_path, sort_by_len=False)
print('训练集句子数目:', len(corpus.train_data))
print('验证集句子数目:', len(corpus.valid_data))
print('测试集句子数目:', len(corpus.test_data))
print('-' * 60)
print('训练集总共单词数目:', sum([len(x) for x in corpus.train_data]))
print('验证集总共单词数目:', sum([len(x) for x in corpus.valid_data]))
print('测试集总共单词数目:', sum([len(x) for x in corpus.test_data]))
print('-' * 60)
print('训练集预测单词数目:', sum([len(x) - 1 for x in corpus.train_data]))
print('验证集预测单词数目:', sum([len(x) - 1 for x in corpus.valid_data]))
print('测试集预测单词数目:', sum([len(x) - 1 for x in corpus.test_data]))
print('-' * 60)
print('数据样本:')
for i in range(5):
print(corpus.train_data[i])
print(corpus.index_to_sentence(corpus.train_data[i]))
print('-' * 60)
print()
corpus = Corpus(data_path, sort_by_len=False, uniform=True)
print('均匀分布:')
print('词频样本:', corpus.word_freqs[:5])
print('词频个数:', len(corpus.word_freqs))
corpus = Corpus(data_path, sort_by_len=False, uniform=False, freq_coef=0.75)
print('词频分布:')
print('词频样本:', corpus.word_freqs[:5])
print('词频个数:', len(corpus.word_freqs))
```
### 定义语言模型负例采样DataSet
* 这里使用PyTorch中的`DataSet`来构建我们自己的语言模型数据集
* 我们自定义的类继承`DataSet`后,要实现`__len__`与`__getitem__`方法
* 每个样本的输入是前n-1个单词,正例为后n-1个单词,负例根据词频和设定的生产个数进行生成
```
class BobSueNegSampleDataSet(torch.utils.data.Dataset):
def __init__(self, index_data, word_freqs, n_negs=20):
self.index_data = index_data # 转换为序号的文本
self.n_negs = n_negs # 生成负例个数
self.word_freqs = torch.FloatTensor(word_freqs) # 词频
def __getitem__(self, i):
inputs = torch.LongTensor(self.index_data[i][:-1])
poss = torch.LongTensor(self.index_data[i][1:])
# 生成n_negs个负例
negs = torch.zeros((len(poss), self.n_negs), dtype=torch.long)
for i in range(len(poss)):
negs[i] = torch.multinomial(self.word_freqs, self.n_negs)
return inputs, poss, negs
def __len__(self):
return len(self.index_data)
```
**简单测试**
```
corpus = Corpus(data_path, sort_by_len=False, uniform=False, freq_coef=0.75)
train_set = BobSueNegSampleDataSet(corpus.train_data, corpus.word_freqs)
print('训练集大小:', len(train_set))
print()
print('训练集样本:')
print('输入大小:', train_set[10][0].shape)
print('正例大小:', train_set[10][1].shape)
print('负例大小:', train_set[10][2].shape)
```
### 定义语言模型的DataLoader
* 这部分跟Part1相同,需要自定义collate_fn来处理这个问题
```
def neglm_collate_fn(batch):
# 首先将batch的格式进行转换
# batch[0]:Inputs
# batch[1]: Poss
# batch[2]: Negs
batch = list(zip(*batch))
# lengths: (batch_size)
lengths = torch.LongTensor([len(x) for x in batch[0]])
# inputs: (batch_size, max_len)
inputs = nn.utils.rnn.pad_sequence(batch[0], batch_first=True)
# poss: (batch_size, max_len)
poss = nn.utils.rnn.pad_sequence(batch[1], batch_first=True)
# negs: (batch_size, max_len, n_negs)
negs = nn.utils.rnn.pad_sequence(batch[2], batch_first=True)
# mask: (batch_size, max_len)
mask = (poss != 0).float()
return inputs, poss, negs, lengths, mask
```
**简单测试**
```
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=8,
shuffle=True,
collate_fn=neglm_collate_fn
)
inputs, poss, negs, lengths, mask = next(iter(train_loader))
print('Input Shape:', inputs.shape)
print('Poss Shape:', poss.shape)
print('Negs Shape:', negs.shape)
print('-' * 60)
print('Lengths:')
print(lengths)
print('Mask:')
print(mask)
```
## 定义网络结构
* 这里实现一个基于LSTM的网络架构,Embedding层维度与LSTM隐含层维度相同
* Inputs数据使用一个Embedding层,Postive和Negtive使用另一个Embedding层
* 损失函数与Word2Vec中的负例采样损失相同
```
class NegSampleLM(nn.Module):
def __init__(self, n_words, n_embed=200, dropout=0.5):
super(NegSampleLM, self).__init__()
self.drop = nn.Dropout(0.5)
# 输入的Embedding
self.embed_in = nn.Embedding(n_words, n_embed)
# 输出的Embedding
self.embed_out = nn.Embedding(n_words, n_embed)
# 这里embed_size一定要和hidden_size相同,为了之后点积计算loss
self.lstm = nn.LSTM(n_embed, n_embed, batch_first=True)
def forward(self, inputs, poss, negs, lengths, mask):
# x_embed: (batch_size, seq_len, embed_size)
x_embed = self.drop(self.embed_in(inputs))
# poss_embed: (batch_size, seq_len, embed_size)
poss_embed = self.embed_out(poss)
# negs_embed: (batch_size, seq_len, n_negs, embed_size)
negs_embed = self.embed_out(negs)
x_embed = nn.utils.rnn.pack_padded_sequence(
x_embed, lengths, batch_first=True, enforce_sorted=False
)
# x_lstm: (batch_size, seq_len, embed_size)
x_lstm, _ = self.lstm(x_embed)
x_lstm, _ = nn.utils.rnn.pad_packed_sequence(
x_lstm, batch_first=True
)
# x_lstm: (batch_size * seq_len, embed_size, 1)
x_lstm = x_lstm.view(-1, x_lstm.shape[2], 1)
# poss_embed: (batch_size * seq_len, 1, embed_size)
poss_embed = poss_embed.view(-1, 1, poss_embed.shape[2])
# negs_embed: (batch_size * seq_len, n_negs, embeds)
negs_embed = negs_embed.view(-1, negs_embed.shape[2], negs_embed.shape[3])
# poss_mm: (batch_size * seq_len)
poss_mm = torch.bmm(poss_embed, x_lstm).squeeze()
# negs_mm: (batch_size * seq_len, n_negs)
negs_mm = torch.bmm(negs_embed, -x_lstm).squeeze()
mask = mask.view(-1)
poss_loss = F.logsigmoid(poss_mm) * mask
negs_loss = F.logsigmoid(negs_mm).mean(1) * mask
total_loss = -(poss_loss + negs_loss)
return total_loss.mean(), x_lstm
```
**简单测试**
```
corpus = Corpus(data_path, sort_by_len=False, uniform=False, freq_coef=0.75)
train_set = BobSueNegSampleDataSet(corpus.train_data, corpus.word_freqs)
model = NegSampleLM(len(corpus.vocab), n_embed=200)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=8,
shuffle=True,
collate_fn=neglm_collate_fn
)
inputs, poss, negs, lengths, mask = next(iter(train_loader))
loss, x_lstm = model(inputs, poss, negs, lengths, mask)
print('损失:', loss.item())
```
* 定义一个辅助函数用来生成预测
* 这里采用LSTM后输出与输出部分的Embedding矩阵权重相乘,选取乘积最大的索引输出
```
def generate_prediction(model, x_lstm):
with torch.no_grad():
x_lstm = x_lstm.squeeze() # (seq_len, embedding_size)
embed_weight = model.embed_out.weight.transpose(0, 1) # (embedding_size, n_words)
preds = x_lstm @ embed_weight
preds = preds.argmax(dim=-1)
return preds
preds = generate_prediction(model, x_lstm)
preds = preds.view(-1, poss.shape[1])
print('Poss Shape:', poss.shape)
print('Preds Shape:', preds.shape)
((preds == poss) * mask).sum()
```
## 模型训练
### 定义基于负例采样的语言模型学习器
* 这里为了之后评估模型训练时间方便,统一将BatchSize固定为1,这样在模型计算过程中也不用进行padding了
```
class NegSampleLearner:
def __init__(self, corpus, n_embed=200, dropout=0.5, n_negs=20,
batch_size=8):
self.corpus = corpus
self.model = NegSampleLM(len(corpus.vocab), n_embed, dropout).to(device)
self.optimizer = torch.optim.Adam(self.model.parameters())
self.n_negs = n_negs
self.batch_size = batch_size
def fit(self, num_epochs):
train_set = BobSueNegSampleDataSet(
self.corpus.train_data,
self.corpus.word_freqs,
n_negs=self.n_negs,
)
valid_set = BobSueNegSampleDataSet(
self.corpus.valid_data,
self.corpus.word_freqs,
n_negs=self.n_negs
)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=self.batch_size,
shuffle=True,
collate_fn=neglm_collate_fn
)
valid_loader = torch.utils.data.DataLoader(
dataset=valid_set,
batch_size=self.batch_size,
shuffle=False,
collate_fn=neglm_collate_fn
)
for epoch in range(num_epochs):
start_time = datetime.datetime.now()
train_loss, train_words = self._make_train_step(train_loader)
end_time = datetime.datetime.now()
print(f'Epoch {epoch+1}:')
print('Train Step --> Loss: {:.3f}, Words: {}, Time: {}s'.format(
train_loss, train_words, (end_time-start_time).seconds
))
valid_loss, valid_acc, valid_words = self._make_valid_step(valid_loader)
print('Valid Step --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
valid_loss, valid_acc, valid_words
))
def _make_train_step(self, train_loader):
# 训练模式
self.model.train()
# 总损失
total_loss = 0.0
# 预测单词总数
total_words = 0
for inputs, poss, negs, lengths, mask in train_loader:
inputs = inputs.to(device)
poss = poss.to(device)
negs = negs.to(device)
lengths = lengths.to(device)
mask = mask.to(device)
# 模型损失
loss, _ = self.model(inputs, poss, negs, lengths, mask)
# 反向传播
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 统计信息
sent_words = lengths.sum().item()
total_words += sent_words
total_loss += loss.item() * sent_words
return total_loss / total_words, total_words
def _make_valid_step(self, valid_loader):
# 验证模式
self.model.eval()
# 总损失
total_loss = 0.0
# 预测正确个数,预测单词总数
total_correct, total_words = 0, 0
with torch.no_grad():
for inputs, poss, negs, lengths, mask in valid_loader:
inputs = inputs.to(device)
poss = poss.to(device)
negs = negs.to(device)
lengths = lengths.to(device)
mask = mask.to(device)
# 模型损失
loss, x_lstm = self.model(inputs, poss, negs, lengths, mask)
# 生成预测,计算准确率
preds = generate_prediction(self.model, x_lstm)
preds = preds.view(-1, poss.shape[1])
total_correct += ((preds == poss) * mask).sum().item()
# 统计信息
sent_words = lengths.sum().item()
total_words += sent_words
total_loss += loss.item() * sent_words
return total_loss / total_words, total_correct / total_words, total_words
def predict(self):
test_set = BobSueNegSampleDataSet(
self.corpus.test_data,
self.corpus.word_freqs,
n_negs=self.n_negs
)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=self.batch_size,
shuffle=False,
collate_fn=neglm_collate_fn
)
# 验证模式
self.model.eval()
# 预测正确个数,预测单词总数
total_correct, total_words = 0, 0
with torch.no_grad():
for inputs, poss, negs, lengths, mask in test_loader:
inputs = inputs.to(device)
poss = poss.to(device)
negs = negs.to(device)
lengths = lengths.to(device)
mask = mask.to(device)
# 模型损失
loss, x_lstm = self.model(inputs, poss, negs, lengths, mask)
# 生成预测,计算准确率
preds = generate_prediction(self.model, x_lstm)
preds = preds.view(-1, poss.shape[1])
total_correct += ((preds == poss) * mask).sum().item()
# 统计信息
sent_words = lengths.sum().item()
total_words += sent_words
return total_correct / total_words, total_words
```
### 模型训练
* 准备就绪,设定好参数开始训练
```
torch.cuda.empty_cache()
corpus = Corpus(data_path, sort_by_len=False, uniform=True, freq_coef=0.1)
learner = NegSampleLearner(corpus, n_embed=200, dropout=0.5, n_negs=20)
learner.fit(10)
```
* 这里简单的看一下训练10个Epoch后的测试集结果
```
test_acc, test_word = learner.predict()
print('测试集预测总词数:', test_word)
print('测试集预测准确率:', test_acc)
```
# 使用上下文信息Context的语言模型
* 这部分的内容基于Part1的实现,不过这次采用的上文信息
* 我们需要重新构建我们的数据集,并将上文信息输入到模型中用于提高准确率
## 数据处理
### 定义语料库
* 这里我们在Part1的基础上将上下文句子按照制表符划分保存到对应的data列表里
```
class ContextCorpus:
def __init__(self, data_path):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.train_data = self.tokenize(data_path / 'bobsue.prevsent.train.tsv')
self.valid_data = self.tokenize(data_path / 'bobsue.prevsent.dev.tsv')
self.test_data = self.tokenize(data_path / 'bobsue.prevsent.test.tsv')
def tokenize(self, text_path):
with open(text_path) as f:
index_data = []
for s in f.readlines():
t = s.split('\t')
index_data.append(
(self.sentence_to_index(t[0]),
self.sentence_to_index(t[1]))
)
return index_data
def sentence_to_index(self, s):
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x):
return ' '.join([self.vocab.itos[i] for i in x])
corpus = ContextCorpus(data_path)
print('训练集句子数目:', len(corpus.train_data))
print('验证集句子数目:', len(corpus.valid_data))
print('测试集句子数目:', len(corpus.test_data))
print('-' * 60)
print('数据样本:')
for i in range(5):
print(corpus.train_data[i][0])
print(corpus.train_data[i][1])
print(corpus.index_to_sentence(corpus.train_data[i][0]))
print(corpus.index_to_sentence(corpus.train_data[i][1]))
```
### 定义上下文语言模型的DataSet
* `DateSet`中包含了每句话的上文信息、输入信息和预测目标信息
```
class BobSueContextDataset(torch.utils.data.Dataset):
def __init__(self, index_data):
self.index_data = index_data
def __getitem__(self, i):
contexts = torch.LongTensor(self.index_data[i][0])
inputs = torch.LongTensor(self.index_data[i][1][:-1])
targets = torch.LongTensor(self.index_data[i][1][1:])
return contexts, inputs, targets
def __len__(self):
return len(self.index_data)
train_set = BobSueContextDataset(corpus.train_data)
print('训练集大小:', len(train_set))
print('训练集样本:')
contexts, inputs, targets = train_set[10]
print('\t上文:', list(contexts.numpy()))
print('\t', corpus.index_to_sentence(contexts.numpy()))
print('\t输入:', list(inputs.numpy()))
print('\t', corpus.index_to_sentence(inputs.numpy()))
print('\t输出:', list(targets.numpy()))
print('\t', corpus.index_to_sentence(targets.numpy()))
```
### 定义上下文语言模型的DataLoader
* 与Part1相同,这里我们也通过自定义collate_fn来处理
```
def ctx_collate_fn(batch):
# 首先将batch的格式进行转换
# batch[0]:Contexts
# batch[1]: Inputs
# batch[2]: Targets
batch = list(zip(*batch))
ctx_lengths = torch.LongTensor([len(x) for x in batch[0]])
inp_lengths = torch.LongTensor([len(x) for x in batch[1]])
contexts = nn.utils.rnn.pad_sequence(batch[0], batch_first=True)
inputs = nn.utils.rnn.pad_sequence(batch[1], batch_first=True)
targets = nn.utils.rnn.pad_sequence(batch[2], batch_first=True)
mask = (targets != 0).float()
return contexts, inputs, targets, ctx_lengths, inp_lengths, mask
```
**简单测试**
```
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=8,
shuffle=True,
collate_fn=ctx_collate_fn
)
contexts, inputs, targets, ctx_lengths, inp_lengths, mask = next(iter(train_loader))
print('Contexts Shape:', contexts.shape)
print('Inputs Shape:', inputs.shape)
print('Targets Shape:', targets.shape)
print('-' * 60)
print('Contexts Lengths:')
print(ctx_lengths)
print('Inputs Lengths: ')
print(inp_lengths)
print('-' * 60)
print('Mask:')
print(mask)
```
## 定义网络结构
### 定义模型
* 和Part1类似定义一个LSTM的网络架构
* 与Part1不同的是,这次我们先将context信息传入第一个LSTM,之后将最后一个hidden作为第二个LSTM的初始值,这样进行训练
```
class ContextLM(nn.Module):
def __init__(self, n_words, n_embed=200, n_hidden=200, dropout=0.5):
super(ContextLM, self).__init__()
self.drop = nn.Dropout(dropout)
self.embed = nn.Embedding(n_words, n_embed)
self.encoder = nn.LSTM(n_embed, n_hidden, batch_first=True)
self.decoder = nn.LSTM(n_embed, n_hidden, batch_first=True)
self.linear = nn.Linear(n_hidden, n_words)
def forward(self, contexts, inputs, ctx_lengths, inp_lengths):
# 对上一句话进行编码
ctx_emb = self.drop(self.embed(contexts))
ctx_emb = nn.utils.rnn.pack_padded_sequence(
ctx_emb, ctx_lengths,
batch_first=True,
enforce_sorted=False
)
_, (h_n, c_n) = self.encoder(ctx_emb)
# 对当前句子进行预测
inp_emb = self.drop(self.embed(inputs))
inp_emb = nn.utils.rnn.pack_padded_sequence(
inp_emb, inp_lengths,
batch_first=True,
enforce_sorted=False
)
inp_out, _ = self.decoder(inp_emb, (h_n, c_n))
inp_out, _ = nn.utils.rnn.pad_packed_sequence(inp_out, batch_first=True)
return self.linear(self.drop(inp_out))
model = ContextLM(len(corpus.vocab), 200, 200)
```
**简单测试**
```
outputs = model(contexts, inputs, ctx_lengths, inp_lengths)
print('模型输出Shape:', outputs.shape)
```
### 定义损失函数
```
class MaskCrossEntropyLoss(nn.Module):
def __init__(self):
super(MaskCrossEntropyLoss, self).__init__()
self.celoss = nn.CrossEntropyLoss(reduction='none')
def forward(self, outputs, targets, mask):
# outputs shape: (batch_size * max_len, vocab_size)
outputs = outputs.view(-1, outputs.size(2))
# targets shape: (batch_size * max_len)
targets = targets.view(-1)
# mask shape: (batch_size * max_len)
mask = mask.view(-1)
loss = self.celoss(outputs, targets) * mask
return torch.sum(loss) / torch.sum(mask)
```
**简单测试**
```
criterion = MaskCrossEntropyLoss()
loss = criterion(outputs, targets, mask)
print('Loss:', loss.item())
```
## 模型训练与预测
### 定义基于上文信息的语言模型学习器
* 这里为了方便起见和上一问相同,每一个批量就单独一句话
```
class ContextLearner:
def __init__(self, corpus, n_embed=200, n_hidden=200, dropout=0.5,
batch_size=128, early_stopping_round=5):
self.corpus = corpus
self.model = ContextLM(len(corpus.vocab), n_embed, n_hidden, dropout)
self.model.to(device)
self.criterion = MaskCrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.model.parameters())
self.history = defaultdict(list)
self.early_stopping_round = early_stopping_round
self.batch_size = batch_size
def fit(self, num_epoch):
train_set = BobSueContextDataset(
self.corpus.train_data
)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=self.batch_size,
shuffle=True,
collate_fn=ctx_collate_fn
)
valid_set = BobSueContextDataset(
self.corpus.valid_data
)
valid_loader = torch.utils.data.DataLoader(
dataset=valid_set,
batch_size=self.batch_size,
shuffle=False,
collate_fn=ctx_collate_fn
)
no_improve_round = 0
for epoch in range(num_epoch):
train_loss, train_acc, train_words = self._make_train_step(train_loader)
print(f'Epoch {epoch+1}:')
print('Train Step --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
train_loss, train_acc, train_words))
self.history['train_loss'].append(train_loss)
self.history['train_acc'].append(train_acc)
valid_loss, valid_acc, valid_words = self._make_valid_step(valid_loader)
print('Valid Step --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
valid_loss, valid_acc, valid_words))
self.history['valid_loss'].append(valid_loss)
self.history['valid_acc'].append(valid_acc)
# 根据验证集的准确率进行EarlyStopping
if self.history['valid_acc'][-1] < max(self.history['valid_acc']):
no_improve_round += 1
else:
no_improve_round = 0
if no_improve_round == self.early_stopping_round:
print(f'Early Stopping at Epoch {epoch+1}')
break
def _make_train_step(self, train_loader):
self.model.train()
total_loss = 0.0
total_correct, total_words = 0, 0
for batch in train_loader:
contexts = batch[0].to(device)
inputs = batch[1].to(device)
targets = batch[2].to(device)
ctx_lengths = batch[3].to(device)
inp_lengths = batch[4].to(device)
mask = batch[5].to(device)
outputs = self.model(contexts, inputs, ctx_lengths, inp_lengths)
loss = self.criterion(outputs, targets, mask)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_correct += (outputs.argmax(-1) == targets).sum().item()
total_words += torch.sum(inp_lengths).item()
total_loss += loss.item() * torch.sum(mask).item()
return total_loss / total_words, total_correct / total_words, total_words
def _make_valid_step(self, valid_loader):
self.model.eval()
total_loss = 0.0
total_correct, total_words = 0, 0
with torch.no_grad():
for batch in valid_loader:
contexts = batch[0].to(device)
inputs = batch[1].to(device)
targets = batch[2].to(device)
ctx_lengths = batch[3].to(device)
inp_lengths = batch[4].to(device)
mask = batch[5].to(device)
outputs = self.model(contexts, inputs, ctx_lengths, inp_lengths)
loss = self.criterion(outputs, targets, mask)
total_correct += (outputs.argmax(-1) == targets).sum().item()
total_words += torch.sum(inp_lengths).item()
total_loss += loss.item() * torch.sum(mask).item()
return total_loss / total_words, total_correct / total_words, total_words
def predict(self):
test_set = BobSueContextDataset(
self.corpus.test_data
)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=1,
shuffle=False,
collate_fn=ctx_collate_fn
)
self.model.eval()
total_loss = 0.0
total_correct, total_words = 0, 0
test_result = defaultdict(list)
with torch.no_grad():
for batch in test_loader:
contexts = batch[0].to(device)
inputs = batch[1].to(device)
targets = batch[2].to(device)
ctx_lengths = batch[3].to(device)
inp_lengths = batch[4].to(device)
mask = batch[5].to(device)
outputs = self.model(contexts, inputs, ctx_lengths, inp_lengths)
loss = self.criterion(outputs, targets, mask)
total_correct += (outputs.argmax(-1) == targets).sum().item()
total_words += torch.sum(inp_lengths).item()
total_loss += loss.item() * torch.sum(mask).item()
test_result['preds'].append(outputs.argmax(-1).data.cpu().numpy()[0])
test_result['targets'].append(targets.data.cpu().numpy()[0])
return total_loss / total_words, total_correct / total_words, total_words, test_result
```
### 模型训练
* 这里设定好参数就可以开始训练了
```
torch.cuda.empty_cache()
corpus = ContextCorpus(data_path)
learner = ContextLearner(corpus, n_embed=200, n_hidden=200, dropout=0.5)
learner.fit(1000)
```
### 模型测试
* 通过上面的训练,我们看一下测试集上的表现
```
test_loss, test_acc, test_words, test_result = learner.predict()
print('测试集上的结果 --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
test_loss, test_acc, test_words))
```
可以看到我们在测试集上的表现已经超越了Part1当中的情况,在Part1中我们的准确率是0.341
```
print('预测句子数量:', len(test_result['preds']))
print('-' * 60)
sample_index = 4
print('结果样例:')
print('预测值\t', test_result['preds'][sample_index])
print('实际值\t', test_result['targets'][sample_index])
print('预测句子\t', corpus.index_to_sentence(test_result['preds'][sample_index]))
print('实际句子\t', corpus.index_to_sentence(test_result['targets'][sample_index]))
```
## 错误分析
* 这里我们统计了一下常见的35个预测错误(实际值,预测值)
```
mistake_counter = Counter()
for i in range(len(test_result['targets'])):
for j in range(len(test_result['targets'][i])):
pred, target = test_result['preds'][i][j], test_result['targets'][i][j]
if pred != target:
pred, target = corpus.vocab.itos[pred], corpus.vocab.itos[target]
mistake_counter[(target, pred)] += 1
mistake_counter.most_common(35)
```
* 这里我们可以看出,在Part1当中主要的句首错误已经得到了有效的缓解,但是对于介词错误和Be动词错误模型还是不能很好的预测
* 这主要是由于后两类错误在没有后文信息的情况下很难作出判断
|
github_jupyter
|
import datetime
import random
from collections import Counter, defaultdict
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchtext
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
set_random_seed(2020)
device = torch.device('cuda:0' if torch.cuda.is_available else 'cpu')
device = torch.device('cuda' if torch.cuda.is_available else 'cpu')
data_path = Path('/media/bnu/data/nlp-practice/language-model')
print('PyTorch Version:', torch.__version__)
print('-' * 60)
if torch.cuda.is_available():
print('CUDA Device Count:', torch.cuda.device_count())
print('CUDA Device Name:')
for i in range(torch.cuda.device_count()):
print('\t', torch.cuda.get_device_name(i))
print('CUDA Current Device Index:', torch.cuda.current_device())
print('-' * 60)
print('Data Path:', data_path)
class Vocab:
def __init__(self, vocab_path):
self.stoi = {} # token -> index (dict)
self.itos = [] # index -> token (list)
with open(vocab_path) as f:
# bobsue.voc.txt中,每一行是一个单词
for w in f.readlines():
w = w.strip()
if w not in self.stoi:
self.stoi[w] = len(self.itos)
self.itos.append(w)
def __len__(self):
return len(self.itos)
vocab = Vocab(data_path / 'bobsue.voc.txt')
print('单词表大小:', len(vocab))
print('-' * 60)
print('样例(单词 -> 索引):')
print(list(vocab.stoi.items())[:5])
print('-' * 60)
print('样例(索引 -> 单词):')
print(list(enumerate(vocab.itos))[:5])
class Corpus:
def __init__(self, data_path, sort_by_len=False,
uniform=False, freq_coef=0.75):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.sort_by_len = sort_by_len
self.train_data = self.tokenize(data_path / 'bobsue.lm.train.txt')
self.valid_data = self.tokenize(data_path / 'bobsue.lm.dev.txt')
self.test_data = self.tokenize(data_path / 'bobsue.lm.test.txt')
# 统计训练集的单词计数
self.word_counter = Counter()
for x in self.train_data:
# 注意<s>不在我们的预测范围内,不要统计
self.word_counter += Counter(x[1:])
# 训练集中需要预测的总词数
total_words = len(list(self.word_counter.elements()))
if uniform: # 均匀分布
self.word_freqs = np.array(
[0.] + [1. for _ in range(len(self.vocab) - 1)],
dtype=np.float32
)
self.word_freqs = self.word_freqs / sum(self.word_freqs)
else: # 词频分布(提升freq_coef次方)
self.word_freqs = np.array(
[self.word_counter[i] for i in range(len(self.vocab))],
dtype=np.float32
)
self.word_freqs = self.word_freqs / sum(self.word_freqs)
self.word_freqs = self.word_freqs ** freq_coef
self.word_freqs = self.word_freqs / sum(self.word_freqs)
def tokenize(self, text_path):
with open(text_path) as f:
index_data = [] # 索引数据,存储每个样本的单词索引列表
for s in f.readlines():
index_data.append(
self.sentence_to_index(s)
)
if self.sort_by_len: # 为了提升训练速度,可以考虑将样本按照长度排序,这样可以减少padding
index_data = sorted(index_data, key=lambda x: len(x), reverse=True)
return index_data
def sentence_to_index(self, s):
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x):
return ' '.join([self.vocab.itos[i] for i in x])
corpus = Corpus(data_path, sort_by_len=False)
print('训练集句子数目:', len(corpus.train_data))
print('验证集句子数目:', len(corpus.valid_data))
print('测试集句子数目:', len(corpus.test_data))
print('-' * 60)
print('训练集总共单词数目:', sum([len(x) for x in corpus.train_data]))
print('验证集总共单词数目:', sum([len(x) for x in corpus.valid_data]))
print('测试集总共单词数目:', sum([len(x) for x in corpus.test_data]))
print('-' * 60)
print('训练集预测单词数目:', sum([len(x) - 1 for x in corpus.train_data]))
print('验证集预测单词数目:', sum([len(x) - 1 for x in corpus.valid_data]))
print('测试集预测单词数目:', sum([len(x) - 1 for x in corpus.test_data]))
print('-' * 60)
print('数据样本:')
for i in range(5):
print(corpus.train_data[i])
print(corpus.index_to_sentence(corpus.train_data[i]))
print('-' * 60)
print()
corpus = Corpus(data_path, sort_by_len=False, uniform=True)
print('均匀分布:')
print('词频样本:', corpus.word_freqs[:5])
print('词频个数:', len(corpus.word_freqs))
corpus = Corpus(data_path, sort_by_len=False, uniform=False, freq_coef=0.75)
print('词频分布:')
print('词频样本:', corpus.word_freqs[:5])
print('词频个数:', len(corpus.word_freqs))
class BobSueNegSampleDataSet(torch.utils.data.Dataset):
def __init__(self, index_data, word_freqs, n_negs=20):
self.index_data = index_data # 转换为序号的文本
self.n_negs = n_negs # 生成负例个数
self.word_freqs = torch.FloatTensor(word_freqs) # 词频
def __getitem__(self, i):
inputs = torch.LongTensor(self.index_data[i][:-1])
poss = torch.LongTensor(self.index_data[i][1:])
# 生成n_negs个负例
negs = torch.zeros((len(poss), self.n_negs), dtype=torch.long)
for i in range(len(poss)):
negs[i] = torch.multinomial(self.word_freqs, self.n_negs)
return inputs, poss, negs
def __len__(self):
return len(self.index_data)
corpus = Corpus(data_path, sort_by_len=False, uniform=False, freq_coef=0.75)
train_set = BobSueNegSampleDataSet(corpus.train_data, corpus.word_freqs)
print('训练集大小:', len(train_set))
print()
print('训练集样本:')
print('输入大小:', train_set[10][0].shape)
print('正例大小:', train_set[10][1].shape)
print('负例大小:', train_set[10][2].shape)
def neglm_collate_fn(batch):
# 首先将batch的格式进行转换
# batch[0]:Inputs
# batch[1]: Poss
# batch[2]: Negs
batch = list(zip(*batch))
# lengths: (batch_size)
lengths = torch.LongTensor([len(x) for x in batch[0]])
# inputs: (batch_size, max_len)
inputs = nn.utils.rnn.pad_sequence(batch[0], batch_first=True)
# poss: (batch_size, max_len)
poss = nn.utils.rnn.pad_sequence(batch[1], batch_first=True)
# negs: (batch_size, max_len, n_negs)
negs = nn.utils.rnn.pad_sequence(batch[2], batch_first=True)
# mask: (batch_size, max_len)
mask = (poss != 0).float()
return inputs, poss, negs, lengths, mask
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=8,
shuffle=True,
collate_fn=neglm_collate_fn
)
inputs, poss, negs, lengths, mask = next(iter(train_loader))
print('Input Shape:', inputs.shape)
print('Poss Shape:', poss.shape)
print('Negs Shape:', negs.shape)
print('-' * 60)
print('Lengths:')
print(lengths)
print('Mask:')
print(mask)
class NegSampleLM(nn.Module):
def __init__(self, n_words, n_embed=200, dropout=0.5):
super(NegSampleLM, self).__init__()
self.drop = nn.Dropout(0.5)
# 输入的Embedding
self.embed_in = nn.Embedding(n_words, n_embed)
# 输出的Embedding
self.embed_out = nn.Embedding(n_words, n_embed)
# 这里embed_size一定要和hidden_size相同,为了之后点积计算loss
self.lstm = nn.LSTM(n_embed, n_embed, batch_first=True)
def forward(self, inputs, poss, negs, lengths, mask):
# x_embed: (batch_size, seq_len, embed_size)
x_embed = self.drop(self.embed_in(inputs))
# poss_embed: (batch_size, seq_len, embed_size)
poss_embed = self.embed_out(poss)
# negs_embed: (batch_size, seq_len, n_negs, embed_size)
negs_embed = self.embed_out(negs)
x_embed = nn.utils.rnn.pack_padded_sequence(
x_embed, lengths, batch_first=True, enforce_sorted=False
)
# x_lstm: (batch_size, seq_len, embed_size)
x_lstm, _ = self.lstm(x_embed)
x_lstm, _ = nn.utils.rnn.pad_packed_sequence(
x_lstm, batch_first=True
)
# x_lstm: (batch_size * seq_len, embed_size, 1)
x_lstm = x_lstm.view(-1, x_lstm.shape[2], 1)
# poss_embed: (batch_size * seq_len, 1, embed_size)
poss_embed = poss_embed.view(-1, 1, poss_embed.shape[2])
# negs_embed: (batch_size * seq_len, n_negs, embeds)
negs_embed = negs_embed.view(-1, negs_embed.shape[2], negs_embed.shape[3])
# poss_mm: (batch_size * seq_len)
poss_mm = torch.bmm(poss_embed, x_lstm).squeeze()
# negs_mm: (batch_size * seq_len, n_negs)
negs_mm = torch.bmm(negs_embed, -x_lstm).squeeze()
mask = mask.view(-1)
poss_loss = F.logsigmoid(poss_mm) * mask
negs_loss = F.logsigmoid(negs_mm).mean(1) * mask
total_loss = -(poss_loss + negs_loss)
return total_loss.mean(), x_lstm
corpus = Corpus(data_path, sort_by_len=False, uniform=False, freq_coef=0.75)
train_set = BobSueNegSampleDataSet(corpus.train_data, corpus.word_freqs)
model = NegSampleLM(len(corpus.vocab), n_embed=200)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=8,
shuffle=True,
collate_fn=neglm_collate_fn
)
inputs, poss, negs, lengths, mask = next(iter(train_loader))
loss, x_lstm = model(inputs, poss, negs, lengths, mask)
print('损失:', loss.item())
def generate_prediction(model, x_lstm):
with torch.no_grad():
x_lstm = x_lstm.squeeze() # (seq_len, embedding_size)
embed_weight = model.embed_out.weight.transpose(0, 1) # (embedding_size, n_words)
preds = x_lstm @ embed_weight
preds = preds.argmax(dim=-1)
return preds
preds = generate_prediction(model, x_lstm)
preds = preds.view(-1, poss.shape[1])
print('Poss Shape:', poss.shape)
print('Preds Shape:', preds.shape)
((preds == poss) * mask).sum()
class NegSampleLearner:
def __init__(self, corpus, n_embed=200, dropout=0.5, n_negs=20,
batch_size=8):
self.corpus = corpus
self.model = NegSampleLM(len(corpus.vocab), n_embed, dropout).to(device)
self.optimizer = torch.optim.Adam(self.model.parameters())
self.n_negs = n_negs
self.batch_size = batch_size
def fit(self, num_epochs):
train_set = BobSueNegSampleDataSet(
self.corpus.train_data,
self.corpus.word_freqs,
n_negs=self.n_negs,
)
valid_set = BobSueNegSampleDataSet(
self.corpus.valid_data,
self.corpus.word_freqs,
n_negs=self.n_negs
)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=self.batch_size,
shuffle=True,
collate_fn=neglm_collate_fn
)
valid_loader = torch.utils.data.DataLoader(
dataset=valid_set,
batch_size=self.batch_size,
shuffle=False,
collate_fn=neglm_collate_fn
)
for epoch in range(num_epochs):
start_time = datetime.datetime.now()
train_loss, train_words = self._make_train_step(train_loader)
end_time = datetime.datetime.now()
print(f'Epoch {epoch+1}:')
print('Train Step --> Loss: {:.3f}, Words: {}, Time: {}s'.format(
train_loss, train_words, (end_time-start_time).seconds
))
valid_loss, valid_acc, valid_words = self._make_valid_step(valid_loader)
print('Valid Step --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
valid_loss, valid_acc, valid_words
))
def _make_train_step(self, train_loader):
# 训练模式
self.model.train()
# 总损失
total_loss = 0.0
# 预测单词总数
total_words = 0
for inputs, poss, negs, lengths, mask in train_loader:
inputs = inputs.to(device)
poss = poss.to(device)
negs = negs.to(device)
lengths = lengths.to(device)
mask = mask.to(device)
# 模型损失
loss, _ = self.model(inputs, poss, negs, lengths, mask)
# 反向传播
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 统计信息
sent_words = lengths.sum().item()
total_words += sent_words
total_loss += loss.item() * sent_words
return total_loss / total_words, total_words
def _make_valid_step(self, valid_loader):
# 验证模式
self.model.eval()
# 总损失
total_loss = 0.0
# 预测正确个数,预测单词总数
total_correct, total_words = 0, 0
with torch.no_grad():
for inputs, poss, negs, lengths, mask in valid_loader:
inputs = inputs.to(device)
poss = poss.to(device)
negs = negs.to(device)
lengths = lengths.to(device)
mask = mask.to(device)
# 模型损失
loss, x_lstm = self.model(inputs, poss, negs, lengths, mask)
# 生成预测,计算准确率
preds = generate_prediction(self.model, x_lstm)
preds = preds.view(-1, poss.shape[1])
total_correct += ((preds == poss) * mask).sum().item()
# 统计信息
sent_words = lengths.sum().item()
total_words += sent_words
total_loss += loss.item() * sent_words
return total_loss / total_words, total_correct / total_words, total_words
def predict(self):
test_set = BobSueNegSampleDataSet(
self.corpus.test_data,
self.corpus.word_freqs,
n_negs=self.n_negs
)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=self.batch_size,
shuffle=False,
collate_fn=neglm_collate_fn
)
# 验证模式
self.model.eval()
# 预测正确个数,预测单词总数
total_correct, total_words = 0, 0
with torch.no_grad():
for inputs, poss, negs, lengths, mask in test_loader:
inputs = inputs.to(device)
poss = poss.to(device)
negs = negs.to(device)
lengths = lengths.to(device)
mask = mask.to(device)
# 模型损失
loss, x_lstm = self.model(inputs, poss, negs, lengths, mask)
# 生成预测,计算准确率
preds = generate_prediction(self.model, x_lstm)
preds = preds.view(-1, poss.shape[1])
total_correct += ((preds == poss) * mask).sum().item()
# 统计信息
sent_words = lengths.sum().item()
total_words += sent_words
return total_correct / total_words, total_words
torch.cuda.empty_cache()
corpus = Corpus(data_path, sort_by_len=False, uniform=True, freq_coef=0.1)
learner = NegSampleLearner(corpus, n_embed=200, dropout=0.5, n_negs=20)
learner.fit(10)
test_acc, test_word = learner.predict()
print('测试集预测总词数:', test_word)
print('测试集预测准确率:', test_acc)
class ContextCorpus:
def __init__(self, data_path):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.train_data = self.tokenize(data_path / 'bobsue.prevsent.train.tsv')
self.valid_data = self.tokenize(data_path / 'bobsue.prevsent.dev.tsv')
self.test_data = self.tokenize(data_path / 'bobsue.prevsent.test.tsv')
def tokenize(self, text_path):
with open(text_path) as f:
index_data = []
for s in f.readlines():
t = s.split('\t')
index_data.append(
(self.sentence_to_index(t[0]),
self.sentence_to_index(t[1]))
)
return index_data
def sentence_to_index(self, s):
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x):
return ' '.join([self.vocab.itos[i] for i in x])
corpus = ContextCorpus(data_path)
print('训练集句子数目:', len(corpus.train_data))
print('验证集句子数目:', len(corpus.valid_data))
print('测试集句子数目:', len(corpus.test_data))
print('-' * 60)
print('数据样本:')
for i in range(5):
print(corpus.train_data[i][0])
print(corpus.train_data[i][1])
print(corpus.index_to_sentence(corpus.train_data[i][0]))
print(corpus.index_to_sentence(corpus.train_data[i][1]))
class BobSueContextDataset(torch.utils.data.Dataset):
def __init__(self, index_data):
self.index_data = index_data
def __getitem__(self, i):
contexts = torch.LongTensor(self.index_data[i][0])
inputs = torch.LongTensor(self.index_data[i][1][:-1])
targets = torch.LongTensor(self.index_data[i][1][1:])
return contexts, inputs, targets
def __len__(self):
return len(self.index_data)
train_set = BobSueContextDataset(corpus.train_data)
print('训练集大小:', len(train_set))
print('训练集样本:')
contexts, inputs, targets = train_set[10]
print('\t上文:', list(contexts.numpy()))
print('\t', corpus.index_to_sentence(contexts.numpy()))
print('\t输入:', list(inputs.numpy()))
print('\t', corpus.index_to_sentence(inputs.numpy()))
print('\t输出:', list(targets.numpy()))
print('\t', corpus.index_to_sentence(targets.numpy()))
def ctx_collate_fn(batch):
# 首先将batch的格式进行转换
# batch[0]:Contexts
# batch[1]: Inputs
# batch[2]: Targets
batch = list(zip(*batch))
ctx_lengths = torch.LongTensor([len(x) for x in batch[0]])
inp_lengths = torch.LongTensor([len(x) for x in batch[1]])
contexts = nn.utils.rnn.pad_sequence(batch[0], batch_first=True)
inputs = nn.utils.rnn.pad_sequence(batch[1], batch_first=True)
targets = nn.utils.rnn.pad_sequence(batch[2], batch_first=True)
mask = (targets != 0).float()
return contexts, inputs, targets, ctx_lengths, inp_lengths, mask
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=8,
shuffle=True,
collate_fn=ctx_collate_fn
)
contexts, inputs, targets, ctx_lengths, inp_lengths, mask = next(iter(train_loader))
print('Contexts Shape:', contexts.shape)
print('Inputs Shape:', inputs.shape)
print('Targets Shape:', targets.shape)
print('-' * 60)
print('Contexts Lengths:')
print(ctx_lengths)
print('Inputs Lengths: ')
print(inp_lengths)
print('-' * 60)
print('Mask:')
print(mask)
class ContextLM(nn.Module):
def __init__(self, n_words, n_embed=200, n_hidden=200, dropout=0.5):
super(ContextLM, self).__init__()
self.drop = nn.Dropout(dropout)
self.embed = nn.Embedding(n_words, n_embed)
self.encoder = nn.LSTM(n_embed, n_hidden, batch_first=True)
self.decoder = nn.LSTM(n_embed, n_hidden, batch_first=True)
self.linear = nn.Linear(n_hidden, n_words)
def forward(self, contexts, inputs, ctx_lengths, inp_lengths):
# 对上一句话进行编码
ctx_emb = self.drop(self.embed(contexts))
ctx_emb = nn.utils.rnn.pack_padded_sequence(
ctx_emb, ctx_lengths,
batch_first=True,
enforce_sorted=False
)
_, (h_n, c_n) = self.encoder(ctx_emb)
# 对当前句子进行预测
inp_emb = self.drop(self.embed(inputs))
inp_emb = nn.utils.rnn.pack_padded_sequence(
inp_emb, inp_lengths,
batch_first=True,
enforce_sorted=False
)
inp_out, _ = self.decoder(inp_emb, (h_n, c_n))
inp_out, _ = nn.utils.rnn.pad_packed_sequence(inp_out, batch_first=True)
return self.linear(self.drop(inp_out))
model = ContextLM(len(corpus.vocab), 200, 200)
outputs = model(contexts, inputs, ctx_lengths, inp_lengths)
print('模型输出Shape:', outputs.shape)
class MaskCrossEntropyLoss(nn.Module):
def __init__(self):
super(MaskCrossEntropyLoss, self).__init__()
self.celoss = nn.CrossEntropyLoss(reduction='none')
def forward(self, outputs, targets, mask):
# outputs shape: (batch_size * max_len, vocab_size)
outputs = outputs.view(-1, outputs.size(2))
# targets shape: (batch_size * max_len)
targets = targets.view(-1)
# mask shape: (batch_size * max_len)
mask = mask.view(-1)
loss = self.celoss(outputs, targets) * mask
return torch.sum(loss) / torch.sum(mask)
criterion = MaskCrossEntropyLoss()
loss = criterion(outputs, targets, mask)
print('Loss:', loss.item())
class ContextLearner:
def __init__(self, corpus, n_embed=200, n_hidden=200, dropout=0.5,
batch_size=128, early_stopping_round=5):
self.corpus = corpus
self.model = ContextLM(len(corpus.vocab), n_embed, n_hidden, dropout)
self.model.to(device)
self.criterion = MaskCrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.model.parameters())
self.history = defaultdict(list)
self.early_stopping_round = early_stopping_round
self.batch_size = batch_size
def fit(self, num_epoch):
train_set = BobSueContextDataset(
self.corpus.train_data
)
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=self.batch_size,
shuffle=True,
collate_fn=ctx_collate_fn
)
valid_set = BobSueContextDataset(
self.corpus.valid_data
)
valid_loader = torch.utils.data.DataLoader(
dataset=valid_set,
batch_size=self.batch_size,
shuffle=False,
collate_fn=ctx_collate_fn
)
no_improve_round = 0
for epoch in range(num_epoch):
train_loss, train_acc, train_words = self._make_train_step(train_loader)
print(f'Epoch {epoch+1}:')
print('Train Step --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
train_loss, train_acc, train_words))
self.history['train_loss'].append(train_loss)
self.history['train_acc'].append(train_acc)
valid_loss, valid_acc, valid_words = self._make_valid_step(valid_loader)
print('Valid Step --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
valid_loss, valid_acc, valid_words))
self.history['valid_loss'].append(valid_loss)
self.history['valid_acc'].append(valid_acc)
# 根据验证集的准确率进行EarlyStopping
if self.history['valid_acc'][-1] < max(self.history['valid_acc']):
no_improve_round += 1
else:
no_improve_round = 0
if no_improve_round == self.early_stopping_round:
print(f'Early Stopping at Epoch {epoch+1}')
break
def _make_train_step(self, train_loader):
self.model.train()
total_loss = 0.0
total_correct, total_words = 0, 0
for batch in train_loader:
contexts = batch[0].to(device)
inputs = batch[1].to(device)
targets = batch[2].to(device)
ctx_lengths = batch[3].to(device)
inp_lengths = batch[4].to(device)
mask = batch[5].to(device)
outputs = self.model(contexts, inputs, ctx_lengths, inp_lengths)
loss = self.criterion(outputs, targets, mask)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_correct += (outputs.argmax(-1) == targets).sum().item()
total_words += torch.sum(inp_lengths).item()
total_loss += loss.item() * torch.sum(mask).item()
return total_loss / total_words, total_correct / total_words, total_words
def _make_valid_step(self, valid_loader):
self.model.eval()
total_loss = 0.0
total_correct, total_words = 0, 0
with torch.no_grad():
for batch in valid_loader:
contexts = batch[0].to(device)
inputs = batch[1].to(device)
targets = batch[2].to(device)
ctx_lengths = batch[3].to(device)
inp_lengths = batch[4].to(device)
mask = batch[5].to(device)
outputs = self.model(contexts, inputs, ctx_lengths, inp_lengths)
loss = self.criterion(outputs, targets, mask)
total_correct += (outputs.argmax(-1) == targets).sum().item()
total_words += torch.sum(inp_lengths).item()
total_loss += loss.item() * torch.sum(mask).item()
return total_loss / total_words, total_correct / total_words, total_words
def predict(self):
test_set = BobSueContextDataset(
self.corpus.test_data
)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=1,
shuffle=False,
collate_fn=ctx_collate_fn
)
self.model.eval()
total_loss = 0.0
total_correct, total_words = 0, 0
test_result = defaultdict(list)
with torch.no_grad():
for batch in test_loader:
contexts = batch[0].to(device)
inputs = batch[1].to(device)
targets = batch[2].to(device)
ctx_lengths = batch[3].to(device)
inp_lengths = batch[4].to(device)
mask = batch[5].to(device)
outputs = self.model(contexts, inputs, ctx_lengths, inp_lengths)
loss = self.criterion(outputs, targets, mask)
total_correct += (outputs.argmax(-1) == targets).sum().item()
total_words += torch.sum(inp_lengths).item()
total_loss += loss.item() * torch.sum(mask).item()
test_result['preds'].append(outputs.argmax(-1).data.cpu().numpy()[0])
test_result['targets'].append(targets.data.cpu().numpy()[0])
return total_loss / total_words, total_correct / total_words, total_words, test_result
torch.cuda.empty_cache()
corpus = ContextCorpus(data_path)
learner = ContextLearner(corpus, n_embed=200, n_hidden=200, dropout=0.5)
learner.fit(1000)
test_loss, test_acc, test_words, test_result = learner.predict()
print('测试集上的结果 --> Loss: {:.3f}, Acc: {:.3f}, Words: {}'.format(
test_loss, test_acc, test_words))
print('预测句子数量:', len(test_result['preds']))
print('-' * 60)
sample_index = 4
print('结果样例:')
print('预测值\t', test_result['preds'][sample_index])
print('实际值\t', test_result['targets'][sample_index])
print('预测句子\t', corpus.index_to_sentence(test_result['preds'][sample_index]))
print('实际句子\t', corpus.index_to_sentence(test_result['targets'][sample_index]))
mistake_counter = Counter()
for i in range(len(test_result['targets'])):
for j in range(len(test_result['targets'][i])):
pred, target = test_result['preds'][i][j], test_result['targets'][i][j]
if pred != target:
pred, target = corpus.vocab.itos[pred], corpus.vocab.itos[target]
mistake_counter[(target, pred)] += 1
mistake_counter.most_common(35)
| 0.497803 | 0.82176 |
# **Least squares regression**
Notebook version: 1.1 (Mar 11, 2016)
Author: Jerónimo Arenas García ([email protected])
Changes: v.1.0 - First version
v.1.1 - UTAD version
Pending changes: *
```
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
import pylab
from test_helper import Test
```
# 2. Least squares regression
This notebook covers the problem of fitting parametric regression models with a minimum least-squares criterion. The material presented here is based on the first lectures of this <a haref=http://mlg.eng.cam.ac.uk/teaching/4f13/1415/>Machine Learning course</a>. In particular, you can refer to the following presentation: <a href=http://mlg.eng.cam.ac.uk/teaching/4f13/1415/lect0102.pdf> Probabilistic Regression</a>.
## 2.1. A parametric approach to the regression problem
We have already presented the goal of regression. Given that we have access to a set of training points, $\{{\bf x}^{(l)}, s^{(l)}\}_{l=1}^L$, the goal is to learn a function $f({\bf x})$ that we can use to make good predictions for an arbitrary input vector.
The following plot illustrates a regression example for unidimensional input data. We have also generated three different regression curves corresponding to polynomia of degrees 1, 2, and 3 with random coefficients.
```
n_points = 35
n_grid = 200
frec = 3
std_n = 0.3
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
f1 = np.random.random((1,1)) + np.random.random((1,1))*X_grid
f2 = np.random.random((1,1)) + np.random.random((1,1))*X_grid + \
np.random.random((1,1))*(X_grid**2)
f3 = np.random.random((1,1)) + np.random.random((1,1))*X_grid + \
np.random.random((1,1))*(X_grid**2) + np.random.random((1,1))*(X_grid**3)
plt.plot(X_tr,S_tr,'b.')
plt.plot(X_grid,f1.T,'g-',label='Linear function')
plt.plot(X_grid,f2.T,'r-',label='Quadratic function')
plt.plot(X_grid,f3.T,'m-',label='Cubic function')
plt.legend(loc='best')
```
### Parametric model
Different from the previous session, today we will assume a parametric expression for the regression curve, adjusting the free parameters according to some criterion that measures the quality of the proposed model.
- For a unidimensional case like the one in the previous figure, a convenient approach is to recur to polynomial expressions:
$${\hat s}(x) = f(x) = w_0 + w_1 x + w_2 x^2 + \dots + w_M x^M$$
- For multidimensional regression, polynomial expressions can include cross-products of the variables. For instance, for a case with two input variables, the degree 2 polynomial would be given by
$${\hat s}({\bf x}) = f({\bf x}) = w_0 + w_1 x_1 + w_2 x_2 + w_3 x_1^2 + w_4 x_2^2 + w_5 x_1 x_2$$
- A linear model for multidimensional regression can be expressed as
$${\hat s}({\bf x}) = f({\bf x}) = w_0 + {\bf w}^\top {\bf x}$$
When we postulate such models, the regression model is reduced to finding the most appropriate values of the parameters ${\bf w} = [w_i]$.
All the previous models have in common the fact that they are linear in the parameters, even though they can implement highly non-linear functions. All the derivations in this notebook are equally valid for other non-linear transformations of the input variables, as long as we keep linear-in-the-parameters models.
```
## Next, we represent some random polynomial functions for degrees between 0 and 14
max_degree = 15
n_points = 200
#Values of X to evaluate the function
X_grid = np.linspace(-1.5, 1.5,n_points)
for idx in range(max_degree):
x1 = plt.subplot(3,5, idx+1)
x1.get_xaxis().set_ticks([])
x1.get_yaxis().set_ticks([])
for kk in range(5):
#Random generation of coefficients for the model
we = np.random.randn(idx+1, 1)
#Evaluate the polynomial with previous coefficients at X_grid values
fout = np.polyval(we, X_grid)
x1.plot(X_grid,fout,'g-')
```
- Should we choose a polynomial?
- What degree should we use for the polynomial?
- For a given degree, how do we choose the weights?
For now, we will find the single "best" polynomial. In a future session, we will see how we can design methods that take into account different polynomia simultaneously.
Next, we will explain how to choose optimal weights according to Least-Squares criterion.
## 2.2 Least squares regression
### 2.2.1 Problem definition
- The goal is to learn a (possibly non-linear) regression model from a set of $L$ labeled points, $\{{\bf x}^{(l)},s{(l)}\}_{l=1}^L$.
- We assume a parametric function of the form:
$${\hat s}({\bf x}) = f({\bf x}) = w_0 z_0({\bf x}) + w_1 z_1({\bf x}) + \dots w_M z_M({\bf x})$$
where $z_i({\bf x})$ are particular transformations of the input vector variables.
Some examples are:
- If ${\bf z} = {\bf x}$, the model is just a linear combination of the input variables
- If ${\bf z} = \left[\begin{array}{c}1\\{\bf x}\end{array}\right]$, we have again a linear combination with the inclusion of a constant term.
- For unidimensional input $x$, ${\bf z} = [1, x, x^2, \dots,x^{M}]^\top$ would implement a polynomia of degree $M$.
- Note that the variables of ${\bf z}$ could also be computed combining different variables of ${\bf x}$. E.g., if ${\bf x} = [x_1,x_2]^\top$, a degree-two polynomia would be implemented with
$${\bf z} = \left[\begin{array}{c}1\\x_1\\x_2\\x_1^2\\x_2^2\\x_1 x_2\end{array}\right]$$
- The above expression does not assume a polynomial model. For instance, we could consider ${\bf z} = [\log(x_1),\log(x_2)]$
Least squares (LS) regression finds the coefficients of the model with the aim of minimizing the square of the residuals. If we define ${\bf w} = [w_0,w_1,\dots,w_M]^\top$, the LS solution would be defined as
\begin{equation}{\bf w}_{LS} = \arg \min_{\bf w} \sum_{l=1}^L [e^{(l)}]^2 = \arg \min_{\bf w} \sum_{l=1}^L \left[s^{(l)} - {\hat s}^{(l)} \right]^2 \end{equation}
### 2.2.2 Vector Notation
In order to solve the LS problem it is convenient to define the following vectors and matrices:
- We can group together all available target values to form the following vector
$${\bf s} = \left[s^{(1)}, s^{(2)}, \dots, s^{(L)} \right]^\top$$
- The estimation of the model for a single input vector ${\bf z}^{(l)}$ (which would be computed from ${\bf x}^{(l)}$), can be expressed as the following inner product
$${\hat s}^{(l)} = {{\bf z}^{(l)}}^\top {\bf w}$$
- If we now group all input vectors into a matrix ${\bf Z}$, so that each row of ${\bf Z}$ contains the transpose of the corresponding ${\bf z}^{(l)}$, we can express
$$\hat{{\bf s}} = \left[{\hat s}^{1}, {\hat s}^{2}, \dots, {\hat s}^{(L)} \right]^\top = {\bf Z} {\bf w}, \;\;\;\; \text{with} \;\; {\bf Z} = \left[\begin{array}{cccc}z_0^{(1)}&z_1^{(1)}&\cdots&z_M^{(1)} \\ z_0^{(2)}&z_1^{(2)}&\cdots&z_M^{(2)} \\ \vdots & \vdots & \ddots & \vdots \\ z_0^{(L)}&z_1^{(L)}&\cdots&z_M^{(L)}\end{array}\right]$$
### 2.2.3 Least-squares solution
- Using the previous notation, the cost minimized by the LS model can be expressed as
$$C({\bf w}) = \sum_{l=1}^L \left[s^{(l)} - {\hat s}^{(l)} \right]^2 = \|{\bf s} - {\hat{\bf s}}\|^2 = \|{\bf s} - {\bf Z}{\bf w}\|^2$$
- Since the above expression depends quadratically on ${\bf w}$ and is non-negative, we know that there is only one point where the derivative of $C({\bf w})$ becomes zero, and that point is necessarily a minimum of the cost
$$\nabla_{\bf w} \|{\bf s} - {\bf Z}{\bf w}\|^2\Bigg|_{{\bf w} = {\bf w}_{LS}} = {\bf 0}$$
<b>Exercise:</b>
Solve the previous problem to show that
$${\bf w}_{LS} = \left( {\bf Z}^\top{\bf Z} \right)^{-1} {\bf Z}^\top{\bf s}$$
The next fragment of code adjusts polynomia of increasing order to randomly generated training data. To illustrate the composition of matrix ${\bf Z}$, we will avoid using functions $\mbox{np.polyfit}$ and $\mbox{np.polyval}$.
```
n_points = 12
n_grid = 200
frec = 3
std_n = 0.2
max_degree = 10
colors = 'brgcmyk'
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
#We start by building the Z matrix
Z = []
for el in X_tr.tolist():
Z.append([el[0]**k for k in range(max_degree+1)])
Z = np.matrix(Z)
Z_grid = []
for el in X_grid.tolist():
Z_grid.append([el**k for k in range(max_degree+1)])
Z_grid = np.matrix(Z_grid)
plt.plot(X_tr,S_tr,'b.')
for k in [1, 2, n_points]: # range(max_degree+1):
Z_iter = Z[:,:k+1]
# Least square solution
#w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr)
# Least squares solution, with leass numerical errors
w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr)
#estimates at all grid points
fout = Z_grid[:,:k+1].dot(w_LS)
fout = np.array(fout).flatten()
plt.plot(X_grid,fout,colors[k%len(colors)]+'-',label='Degree '+str(k))
plt.legend(loc='best')
plt.ylim(1.2*np.min(S_tr), 1.2*np.max(S_tr))
```
### 2.2.4 Overfitting the training data
It may seem that increasing the degree of the polynomia is always beneficial, as we can implement a more expressive function. A polynomia of degree $M$ would include all polynomia of lower degrees as particular cases. However, if we increase the number of parameters without control, the polynomia would eventually get expressive enough to adjust any given set of training points to arbitrary precision, what does not necessarily mean that the solution is obtaining a model that can be extrapolated to new data, as we show in the following example:
```
n_points = 35
n_test = 200
n_grid = 200
frec = 3
std_n = 0.7
max_degree = 15
colors = 'brgcmyk'
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Test points to validate the generalization of the solution
X_tst = (3 * np.random.random((n_test,1)) - 0.5)
S_tst = np.cos(frec*X_tst) + std_n * np.random.randn(n_test,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
#We start by building the Z matrix
def extend_matrix(X,max_degree):
Z = []
X = X.reshape((X.shape[0],1))
for el in X.tolist():
Z.append([el[0]**k for k in range(max_degree+1)])
return np.matrix(Z)
Z = extend_matrix(X_tr,max_degree)
Z_grid = extend_matrix(X_grid,max_degree)
Z_test = extend_matrix(X_tst,max_degree)
#Variables to store the train and test errors
tr_error = []
tst_error = []
for k in range(max_degree):
Z_iter = Z[:,:k+1]
#Least square solution
#w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr)
# Least squares solution, with leass numerical errors
w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr)
#estimates at traint and test points
f_tr = Z_iter.dot(w_LS)
f_tst = Z_test[:,:k+1].dot(w_LS)
tr_error.append(np.array((S_tr-f_tr).T.dot(S_tr-f_tr)/len(S_tr))[0,0])
tst_error.append(np.array((S_tst-f_tst).T.dot(S_tst-f_tst)/len(S_tst))[0,0])
plt.stem(range(max_degree),tr_error,'b-',label='Train error')
plt.stem(range(max_degree),tst_error,'r-o',label='Test error')
plt.legend(loc='best')
```
### Exercise
Analyze the performance of LS regression on the `Advertising` dataset. You can analyze:
- The performance of linear regression when using just one variable, or using all of them together
- The performance of different non-linear methods (e.g., polynomial or logarithmic transformations)
- Model selection using CV strategies
|
github_jupyter
|
# Import some libraries that will be necessary for working with data and displaying plots
# To visualize plots in the notebook
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.io # To read matlab files
import pylab
from test_helper import Test
n_points = 35
n_grid = 200
frec = 3
std_n = 0.3
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
f1 = np.random.random((1,1)) + np.random.random((1,1))*X_grid
f2 = np.random.random((1,1)) + np.random.random((1,1))*X_grid + \
np.random.random((1,1))*(X_grid**2)
f3 = np.random.random((1,1)) + np.random.random((1,1))*X_grid + \
np.random.random((1,1))*(X_grid**2) + np.random.random((1,1))*(X_grid**3)
plt.plot(X_tr,S_tr,'b.')
plt.plot(X_grid,f1.T,'g-',label='Linear function')
plt.plot(X_grid,f2.T,'r-',label='Quadratic function')
plt.plot(X_grid,f3.T,'m-',label='Cubic function')
plt.legend(loc='best')
## Next, we represent some random polynomial functions for degrees between 0 and 14
max_degree = 15
n_points = 200
#Values of X to evaluate the function
X_grid = np.linspace(-1.5, 1.5,n_points)
for idx in range(max_degree):
x1 = plt.subplot(3,5, idx+1)
x1.get_xaxis().set_ticks([])
x1.get_yaxis().set_ticks([])
for kk in range(5):
#Random generation of coefficients for the model
we = np.random.randn(idx+1, 1)
#Evaluate the polynomial with previous coefficients at X_grid values
fout = np.polyval(we, X_grid)
x1.plot(X_grid,fout,'g-')
n_points = 12
n_grid = 200
frec = 3
std_n = 0.2
max_degree = 10
colors = 'brgcmyk'
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
#We start by building the Z matrix
Z = []
for el in X_tr.tolist():
Z.append([el[0]**k for k in range(max_degree+1)])
Z = np.matrix(Z)
Z_grid = []
for el in X_grid.tolist():
Z_grid.append([el**k for k in range(max_degree+1)])
Z_grid = np.matrix(Z_grid)
plt.plot(X_tr,S_tr,'b.')
for k in [1, 2, n_points]: # range(max_degree+1):
Z_iter = Z[:,:k+1]
# Least square solution
#w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr)
# Least squares solution, with leass numerical errors
w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr)
#estimates at all grid points
fout = Z_grid[:,:k+1].dot(w_LS)
fout = np.array(fout).flatten()
plt.plot(X_grid,fout,colors[k%len(colors)]+'-',label='Degree '+str(k))
plt.legend(loc='best')
plt.ylim(1.2*np.min(S_tr), 1.2*np.max(S_tr))
n_points = 35
n_test = 200
n_grid = 200
frec = 3
std_n = 0.7
max_degree = 15
colors = 'brgcmyk'
#Location of the training points
X_tr = (3 * np.random.random((n_points,1)) - 0.5)
#Labels are obtained from a sinusoidal function, and contaminated by noise
S_tr = np.cos(frec*X_tr) + std_n * np.random.randn(n_points,1)
#Test points to validate the generalization of the solution
X_tst = (3 * np.random.random((n_test,1)) - 0.5)
S_tst = np.cos(frec*X_tst) + std_n * np.random.randn(n_test,1)
#Equally spaced points in the X-axis
X_grid = np.linspace(np.min(X_tr),np.max(X_tr),n_grid)
#We start by building the Z matrix
def extend_matrix(X,max_degree):
Z = []
X = X.reshape((X.shape[0],1))
for el in X.tolist():
Z.append([el[0]**k for k in range(max_degree+1)])
return np.matrix(Z)
Z = extend_matrix(X_tr,max_degree)
Z_grid = extend_matrix(X_grid,max_degree)
Z_test = extend_matrix(X_tst,max_degree)
#Variables to store the train and test errors
tr_error = []
tst_error = []
for k in range(max_degree):
Z_iter = Z[:,:k+1]
#Least square solution
#w_LS = (np.linalg.inv(Z_iter.T.dot(Z_iter))).dot(Z_iter.T).dot(S_tr)
# Least squares solution, with leass numerical errors
w_LS, resid, rank, s = np.linalg.lstsq(Z_iter, S_tr)
#estimates at traint and test points
f_tr = Z_iter.dot(w_LS)
f_tst = Z_test[:,:k+1].dot(w_LS)
tr_error.append(np.array((S_tr-f_tr).T.dot(S_tr-f_tr)/len(S_tr))[0,0])
tst_error.append(np.array((S_tst-f_tst).T.dot(S_tst-f_tst)/len(S_tst))[0,0])
plt.stem(range(max_degree),tr_error,'b-',label='Train error')
plt.stem(range(max_degree),tst_error,'r-o',label='Test error')
plt.legend(loc='best')
| 0.620047 | 0.991023 |
# Scaling XGBoost with Dask and Coiled
This notebook walks through training an [XGBoost](https://xgboost.readthedocs.io/en/latest/) model locally on a small dataset and then using [Dask](https://dask.org/) and [Coiled](https://coiled.io/) to scale out to the cloud and run XGBoost on a larger-than-memory dataset.
# Local XGBoost
[XGBoost](https://xgboost.readthedocs.io/en/latest/) is a popular library for training gradient boosted supervised machine learning models.
## Load our dataset
The first step towards training our model is to load our dataset. We'll use the [Higgs dataset](https://archive.ics.uci.edu/ml/datasets/HIGGS), which is available on Amazon S3. The dataset is composed of 11 million simulated particle collisions, each of which is described by 28 real-valued, features and a binary label indicating which class the sample belongs to (i.e. whether the sample represents a signal or background event). To start, we'll load only a sample of the dataset (just over 175 thousand samples) and process the full datset in the next section.
```
import pandas as pd
# Load a single CSV file
df = pd.read_csv("s3://coiled-data/higgs/higgs-00.csv")
df
```
Next, we can separate our classification label and training features and then use Scikit-learn's `sklearn.model_selection.train_test_split` function to partition the dataset into training and testing samples.
```
X, y = df.iloc[:, 1:], df["labels"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
```
To use XGBoost, we'll need to construct `xgboost.DMatrix` objects for both our training and testing datasets -- these are the internal data structures XGBoost uses to manage dataset features and targets. However, since XGBoost plays well with libaries like NumPy and Pandas, we can simply pass our training and testing datasets directly to `xgboost.DMatrix(...)`.
```
import xgboost
dtrain = xgboost.DMatrix(X_train, y_train)
dtest = xgboost.DMatrix(X_test, y_test)
```
Next we'll define the set of hyperparameters we want to use for our XGBoost model and train the model!
```
params = {
'objective': 'binary:logistic',
'max_depth': 3,
'min_child_weight': 0.5,
}
bst = xgboost.train(params, dtrain, num_boost_round=3)
```
Now that our model has been trained, we can use it to make predictions on the testing dataset which was _not_ used to train the model.
```
y_pred = bst.predict(dtest)
y_pred
```
To get a sense for the quality of these predictions we can compute and plot a [receiver operating characteristic (ROC) curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) of our model's predictions, which compares the predicted output from our model with the known labels to calculate the true postive rate vs. false positive rate.
```
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_test, y_pred)
from sklearn.metrics import auc
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot(fpr, tpr, lw=3,
label='ROC Curve (area = {:.2f})'.format(auc(fpr, tpr)))
ax.plot([0, 1], [0, 1], "k--", lw=2)
ax.set(
xlim=(0, 1),
ylim=(0, 1),
title="ROC Curve",
xlabel="False Positive Rate",
ylabel="True Positive Rate",
)
ax.legend()
plt.show()
```
# Scaling with Dask and Coiled
In the previous section, we trained a model with a subset of the full Higgs dataset. In this section, we will use the full dataset with 11 million samples! With this increased number of samples, the dataset may not fit comfortably into memory on a personal laptop. So we'll use Dask and Coiled to expand our compute resources to the cloud to enable us to work with this larger datset.
### Create a Dask cluster on AWS with Coiled
Let's create a Coiled cluster using the `examples/xgboost` software environment, which has Dask, XGBoost, Scikit-learn, and other relavant packages installed, and then connect a `dask.distributed.Client` to our cluster so we can begin to submit tasks to the cluster.
```
import coiled
cluster = coiled.Cluster(
n_workers=10,
software="examples/scaling-xgboost",
)
import dask.distributed
client = dask.distributed.Client(cluster)
client
```
#### ☝️ Don’t forget to click the \"Dashboard\" link above to view the cluster dashboard!
### Load full dataset
Dask's `read_csv` functions makes it easy to read in all the CSV files in the dataset.
```
import dask.dataframe as dd
# Load the entire dataset using Dask
ddf = dd.read_csv("s3://coiled-data/higgs/higgs-*.csv", storage_options={"anon": True})
ddf
```
Dask's machine learning library, [Dask-ML](https://ml.dask.org/), mimics Scikit-learn's API, providing scalable versions of functions of `sklearn.datasets.make_classification` and `sklearn.model_selection.train_test_split` that are designed to work with Dask Arrays and DataFrames in larger-than-memory settings.
Let's use Dask-ML to generate a similar classification dataset as before, but now with 100 million samples.
```
from dask_ml.model_selection import train_test_split
X, y = ddf.iloc[:, 1:], ddf["labels"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True, random_state=2)
```
Next we'll [persist our training and testing datasets](https://distributed.dask.org/en/latest/memory.html#persisting-collections) into distributed memory to avoid any unnecessary re-computations.
```
import dask
X_train, X_test, y_train, y_test = dask.persist(X_train, X_test, y_train, y_test)
X_train
```
To do distributed training of an XGBoost model, we'll use the [Dask-XGBoost](https://github.com/dask/dask-xgboost) package which mirrors XGBoost's interface but works with Dask Arrays and DataFrames.
```
import dask_xgboost
bst = dask_xgboost.train(client, params, X_train, y_train, num_boost_round=3)
```
Finally, we can again compute and plot the ROC curve for this model's predictions.
```
y_pred = dask_xgboost.predict(client, bst, X_test)
y_test, y_pred = dask.compute(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot(fpr, tpr, lw=3,
label='ROC Curve (area = {:.2f})'.format(auc(fpr, tpr)))
ax.plot([0, 1], [0, 1], "k--", lw=2)
ax.set(
xlim=(0, 1),
ylim=(0, 1),
title="ROC Curve",
xlabel="False Positive Rate",
ylabel="True Positive Rate",
)
ax.legend()
plt.show()
```
Voilà! Congratulations on training a boosted decision tree in the cloud.
|
github_jupyter
|
import pandas as pd
# Load a single CSV file
df = pd.read_csv("s3://coiled-data/higgs/higgs-00.csv")
df
X, y = df.iloc[:, 1:], df["labels"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
import xgboost
dtrain = xgboost.DMatrix(X_train, y_train)
dtest = xgboost.DMatrix(X_test, y_test)
params = {
'objective': 'binary:logistic',
'max_depth': 3,
'min_child_weight': 0.5,
}
bst = xgboost.train(params, dtrain, num_boost_round=3)
y_pred = bst.predict(dtest)
y_pred
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_test, y_pred)
from sklearn.metrics import auc
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot(fpr, tpr, lw=3,
label='ROC Curve (area = {:.2f})'.format(auc(fpr, tpr)))
ax.plot([0, 1], [0, 1], "k--", lw=2)
ax.set(
xlim=(0, 1),
ylim=(0, 1),
title="ROC Curve",
xlabel="False Positive Rate",
ylabel="True Positive Rate",
)
ax.legend()
plt.show()
import coiled
cluster = coiled.Cluster(
n_workers=10,
software="examples/scaling-xgboost",
)
import dask.distributed
client = dask.distributed.Client(cluster)
client
import dask.dataframe as dd
# Load the entire dataset using Dask
ddf = dd.read_csv("s3://coiled-data/higgs/higgs-*.csv", storage_options={"anon": True})
ddf
from dask_ml.model_selection import train_test_split
X, y = ddf.iloc[:, 1:], ddf["labels"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True, random_state=2)
import dask
X_train, X_test, y_train, y_test = dask.persist(X_train, X_test, y_train, y_test)
X_train
import dask_xgboost
bst = dask_xgboost.train(client, params, X_train, y_train, num_boost_round=3)
y_pred = dask_xgboost.predict(client, bst, X_test)
y_test, y_pred = dask.compute(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot(fpr, tpr, lw=3,
label='ROC Curve (area = {:.2f})'.format(auc(fpr, tpr)))
ax.plot([0, 1], [0, 1], "k--", lw=2)
ax.set(
xlim=(0, 1),
ylim=(0, 1),
title="ROC Curve",
xlabel="False Positive Rate",
ylabel="True Positive Rate",
)
ax.legend()
plt.show()
| 0.858318 | 0.991644 |
<table width="100%">
<tr style="border-bottom:solid 2pt #009EE3">
<td style="text-align:left" width="10%">
<a href="biosignalsnotebooks.dwipynb" download><img src="../../images/icons/download.png"></a>
</td>
<td style="text-align:left" width="10%">
<a href="https://mybinder.org/v2/gh/biosignalsnotebooks/biosignalsnotebooks/biosignalsnotebooks_binder?filepath=biosignalsnotebooks_environment%2Fcategories%2FMainFiles%2Fbiosignalsnotebooks.dwipynb" target="_blank"><img src="../../images/icons/program.png" title="Be creative and test your solutions !"></a>
</td>
<td></td>
<td style="text-align:left" width="5%">
<a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png"></a>
</td>
<td style="text-align:left" width="5%">
<a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png"></a>
</td>
<td style="text-align:left" width="5%">
<a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png"></a>
</td>
<td style="border-left:solid 2pt #009EE3" width="15%">
<img src="../../images/ost_logo.png">
</td>
</tr>
</table>
<link rel="stylesheet" href="../../styles/theme_style.css">
<img src="../../images/OS_logo_title_slim.png">
<div class="title"><h2 class="color11"> Available Notebooks </h2></div>
<table id="notebook_list" width="100%">
<tr>
<td width="20%" class="center_cell group_by_header_grey"> Category </td>
<td width="60%" class="center_cell group_by_header"></td>
<td width="20%" class="center_cell"></td>
</tr>
<tr>
<td rowspan='5' class='center_cell open_cell_border_1'><span style='float:center'><img src='../../images/icons/Load.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color1'>Load</span></td>
<td class='center_cell color1_cell color1_top'><span style='float:center'>Load</span></td>
<td class='center_cell gradient_color1'></td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Load/open_h5.ipynb'>Load acquired data from .h5 file</a> </td>
<td class='center_cell'> <a href='../Load/open_h5.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Load/open_signals_after_acquisition.ipynb'> Load Signals after Acquisition [OpenSignals] </a> </td>
<td class='center_cell'> <a href='../Load/open_signals_after_acquisition.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Load/open_txt.ipynb'>Load acquired data from .txt file</a> </td>
<td class='center_cell'> <a href='../Load/open_txt.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Load/signal_loading_preparatory_steps.ipynb'>Signal Loading - Working with File Header </a> </td>
<td class='center_cell'> <a href='../Load/signal_loading_preparatory_steps.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='4' class='center_cell open_cell_border_2'><span style='float:center'><img src='../../images/icons/Record.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color2'>Record</span></td>
<td class='center_cell color2_cell '><span style='float:center'>Record</span></td>
<td class='center_cell gradient_color2'></td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Record/record_data.ipynb'> Signal Acquisition [OpenSignals] </a> </td>
<td class='center_cell'> <a href='../Record/record_data.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Record/sampling_rate_and_aliasing.ipynb'>Problems of low sampling rate (aliasing)</a> </td>
<td class='center_cell'> <a href='../Record/sampling_rate_and_aliasing.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Record/store_signals_after_acquisition.ipynb'> Store Files after Acquisition [OpenSignals] </a> </td>
<td class='center_cell'> <a href='../Record/store_signals_after_acquisition.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='4' class='center_cell open_cell_border_4'><span style='float:center'><img src='../../images/icons/Pre-Process.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color4'>Pre-Process</span></td>
<td class='center_cell color4_cell '><span style='float:center'>Pre-Process</span></td>
<td class='center_cell gradient_color4'></td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Pre-Process/digital_filtering.ipynb'> Digital Filtering - A Fundamental Pre-Processing Step </a> </td>
<td class='center_cell'> <a href='../Pre-Process/digital_filtering.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Pre-Process/emg_fatigue_evaluation_median_freq.ipynb'>Fatigue Evaluation - Evolution of Median Power Frequency</a> </td>
<td class='center_cell'> <a href='../Pre-Process/emg_fatigue_evaluation_median_freq.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Pre-Process/unit_conversion_ecg.ipynb'>ECG Sensor - Unit Conversion </a> </td>
<td class='center_cell'> <a href='../Pre-Process/unit_conversion_ecg.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='3' class='center_cell open_cell_border_5'><span style='float:center'><img src='../../images/icons/Detect.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color5'>Detect</span></td>
<td class='center_cell color5_cell '><span style='float:center'>Detect</span></td>
<td class='center_cell gradient_color5'></td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Detect/detect_bursts.ipynb'> Event Detection - Muscular Activations (EMG) </a> </td>
<td class='center_cell'> <a href='../Detect/detect_bursts.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Detect/r_peaks.ipynb'> Event Detection - R Peaks (ECG) </a> </td>
<td class='center_cell'> <a href='../Detect/r_peaks.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='2' class='center_cell open_cell_border_6'><span style='float:center'><img src='../../images/icons/Extract.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color6'>Extract</span></td>
<td class='center_cell color6_cell '><span style='float:center'>Extract</span></td>
<td class='center_cell gradient_color6'></td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Extract/emg_parameters.ipynb'> EMG Analysis - Time and Frequency Parameters </a> </td>
<td class='center_cell'> <a href='../Extract/emg_parameters.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='5' class='center_cell open_cell_border_7'><span style='float:center'><img src='../../images/icons/Train_and_Classify.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color7'>Train_and_Classify</span></td>
<td class='center_cell color7_cell '><span style='float:center'>Train_and_Classify</span></td>
<td class='center_cell gradient_color7'></td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_1.ipynb'> Stone, Paper or Scissor Game - Train and Classify [Volume 1] </a> </td>
<td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_1.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_2.ipynb'> Stone, Paper or Scissor Game - Train and Classify [Volume 2] </a> </td>
<td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_2.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr >
<td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_3.ipynb'> Stone, Paper or Scissor Game - Train and Classify [Volume 3] </a> </td>
<td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_3.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Train_and_Classify/classification_game_volume_4.ipynb'> Stone, Paper or Scissor Game - Train and Classify [Volume 4] </a> </td>
<td class='center_cell'> <a href='../Train_and_Classify/classification_game_volume_4.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='2' class='center_cell open_cell_border_12'><span style='float:center'><img src='../../images/icons/Evaluate.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color12'>Evaluate</span></td>
<td class='center_cell color12_cell '><span style='float:center'>Evaluate</span></td>
<td class='center_cell gradient_color12'></td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Evaluate/classification_game_volume_5.ipynb'> Stone, Paper or Scissor Game - Train and Classify [Volume 5] </a> </td>
<td class='center_cell'> <a href='../Evaluate/classification_game_volume_5.ipynb'><div class='file_icon'></div></a> </td>
</tr>
<tr>
<td rowspan='2' class='center_cell open_cell_border_13'><span style='float:center'><img src='../../images/icons/Other.png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color13'>Other</span></td>
<td class='center_cell color13_cell '><span style='float:center'>Other</span></td>
<td class='center_cell gradient_color13'></td>
</tr>
<tr class='border_cell_bottom_white'>
<td class='center_cell open_cell_light'> <a href='../Other/pairing_device.ipynb'> Pairing a Device [biosignalsplux] </a> </td>
<td class='center_cell'> <a href='../Other/pairing_device.ipynb'><div class='file_icon'></div></a> </td>
</tr>
</table><br>**<span style="color:#009EE3">biosignalsnotebooks</span>** (<a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf">see project presentation<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>) is a set of documents and a **<span class="color1">Python</span>** library to provide programming examples in the form of **<span class="color5">Jupyter Notebooks</span>**, as companion to the **<span style="color:#009EE3">OpenSignals</span>** biosignals acquisition tools.
This collection of code samples has the purpose to help users of PLUX Wireless Biosignals systems, such as **bitalino** or **biosignalsplux**, and to the researcher or student interested on recording processing and classifying biosignals. The examples are set on a level of complexity to inspire the users and programmers on how easy some tasks are and that more complex ones can also be achieved, by reusing and recreating some of the examples presented here.
A **<span class="color1">Python</span>** library (entitled **<span style="color:#009EE3">biosignalsnotebooks</span>** ) is the base toolbox to support the notebooks and to provide some useful functionalities. It can be installed through pip command, like demonstrated in a <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">PyPI <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> dedicated page.
In many cases we also point and illustrate with code the usage of other python toolboxes dedicated to biosignal processing.
The notebooks will cover the full topics pipeline of working with biosignals, such as: **<span class="color1">Load</span>** a file; **<span class="color3">Visualise</span>** the data online and offline, **<span class="color4">Pre-Process</span>** a one channel signal or a multi-channel acquisition, **<span class="color5">Detect</span>** relevant events in the signals, **<span class="color6">Extract</span>** features from many different type of sensors and domains, **<span class="color7">Train and Classify</span>** among a set of classes with several machine learning approaches, **<span class="color8">Understand</span>** the obtained results with metrics and validations techniques.
These examples are carried in a multitude of biosignals , from ECG, EDA, EMG, Accelerometer, Respiration among many others.
The notebooks have a set of labels to help navigate among topics <a href="../MainFiles/by_tag.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>, types of signals <a href="../MainFiles/by_signal_type.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>, application area <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> and complexity <a href="../MainFiles/by_diff.ipynb"><img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> level to support the search for particular solutions.
We encourage you to share new example ideas, to pose questions [email protected], and to make improvements or suggestion to this set of notebooks.
**Be inspired on how to make the most of your biosignals!**
<br>
<hr>
<table width="100%">
<tr>
<td style="border-right:solid 3px #009EE3" width="20%">
<img src="../../images/ost_logo.png">
</td>
<td width="40%" style="text-align:left">
<a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
<br>
<a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
<br>
<a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
<br>
<a href="../MainFiles/signal_samples.ipynb">☌ Signal Library</a>
</td>
<td width="40%" style="text-align:left">
<a href="../MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
<br>
<a href="../MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
<br>
<a href="../MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
<br>
<a href="../MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
</td>
</tr>
</table>
<span class="color6">**Auxiliary Code Segment (should not be replicated by
the user)**</span>
```
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
```
|
github_jupyter
|
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
| 0.22627 | 0.727238 |
# 7.4 word2vec、fastTextを用いた日本語単語のベクトル表現の実装
- 本ファイルでは、日本語の単語をword2vecもしくはfastTextを使用してベクトル化する手法を解説します。
※ 本章のファイルはすべてUbuntuでの動作を前提としています。Windowsなど文字コードが違う環境での動作にはご注意下さい。
# 7.4 学習目標
1. 学習済みの日本語word2vecモデルで単語をベクトル表現に変換する実装ができるようになる
2. 学習済みの日本語fastText モデルで単語をベクトル表現に変換する実装ができるようになる
# 事前準備
書籍の指示に従い、本章で使用するデータを用意します
pip install gensim
# 1. 文書を読み込んで、分かち書き、データセット作成まで(8.2と同じです)
前処理と分かち書きをし、最後にデータセットを作成する部分を実装します
```
# 単語分割にはMecab+NEologdを使用
import MeCab
m_t = MeCab.Tagger('-Owakati -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
def tokenizer_mecab(text):
text = m_t.parse(text) # これでスペースで単語が区切られる
ret = text.strip().split() # スペース部分で区切ったリストに変換
return ret
# 前処理として正規化をする関数を定義
import re
def preprocessing_text(text):
# 改行、半角スペース、全角スペースを削除
text = re.sub('\r', '', text)
text = re.sub('\n', '', text)
text = re.sub(' ', '', text)
text = re.sub(' ', '', text)
# 数字文字の一律「0」化
text = re.sub(r'[0-9 0-9]', '0', text) # 数字
return text
# 前処理とJanomeの単語分割を合わせた関数を定義する
def tokenizer_with_preprocessing(text):
text = preprocessing_text(text) # 前処理の正規化
ret = tokenizer_mecab(text) # Mecabの単語分割
return ret
import torchtext
# tsvやcsvデータを読み込んだときに、読み込んだ内容に対して行う処理を定義します
# 文章とラベルの両方に用意します
max_length = 25
TEXT = torchtext.data.Field(sequential=True, tokenize=tokenizer_with_preprocessing,
use_vocab=True, lower=True, include_lengths=True, batch_first=True, fix_length=max_length)
LABEL = torchtext.data.Field(sequential=False, use_vocab=False)
# フォルダ「data」から各tsvファイルを読み込みます
train_ds, val_ds, test_ds = torchtext.data.TabularDataset.splits(
path='./data/', train='text_train.tsv',
validation='text_val.tsv', test='text_test.tsv', format='tsv',
fields=[('Text', TEXT), ('Label', LABEL)])
```
# 2. 単語のベクトル化
## 2.1 word2vec
単語をベクトル表現に変換します。
TorchTextには日本語の学習済みデータがないわけではないですが、精度が微妙なので
東北大学 乾・岡崎研究室で公開されているWord2Vecの学習済みのベクトルを使用します。
```
# 以下から、日本語のfasttextの学習済みベクトルをダウンロードします
# 東北大学 乾・岡崎研究室:日本語 Wikipedia エンティティベクトル
# http://www.cl.ecei.tohoku.ac.jp/~m-suzuki/jawiki_vector/
# http://www.cl.ecei.tohoku.ac.jp/~m-suzuki/jawiki_vector/data/20170201.tar.bz2
# そのままではtorchtextで読み込めないので、gensimライブラリを使用して、
# Word2Vecのformatで保存し直します
# 事前インストール
# pip install gensim
from gensim.models import KeyedVectors
# 一度gensimライブラリで読み込んで、word2vecのformatで保存する
model = KeyedVectors.load_word2vec_format(
'./data/entity_vector/entity_vector.model.bin', binary=True)
# 保存(時間がかかります、10分弱)
model.wv.save_word2vec_format('./data/japanese_word2vec_vectors.vec')
# torchtextで単語ベクトルとして読み込みます
from torchtext.vocab import Vectors
japanese_word2vec_vectors = Vectors(
name='./data/japanese_word2vec_vectors.vec')
# 単語ベクトルの中身を確認します
print("1単語を表現する次元数:", japanese_word2vec_vectors.dim)
print("単語数:", len(japanese_word2vec_vectors.itos))
# ベクトル化したバージョンのボキャブラリーを作成します
TEXT.build_vocab(train_ds, vectors=japanese_word2vec_vectors, min_freq=1)
# ボキャブラリーのベクトルを確認します
print(TEXT.vocab.vectors.shape) # 49個の単語が200次元のベクトルで表現されている
TEXT.vocab.vectors
# ボキャブラリーの単語の順番を確認します
TEXT.vocab.stoi
# 姫 - 女性 + 男性 のベクトルがどれと似ているのか確認してみます
import torch.nn.functional as F
# 姫 - 女性 + 男性
tensor_calc = TEXT.vocab.vectors[41] - \
TEXT.vocab.vectors[38] + TEXT.vocab.vectors[46]
# コサイン類似度を計算
# dim=0 は0次元目で計算してくださいという指定
print("女王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[39], dim=0))
print("王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[44], dim=0))
print("王子", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[45], dim=0))
print("機械学習", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[43], dim=0))
```
姫 - 女性 + 男性 を計算すると狙った通り、王子がもっとも近い結果になりました
## 2.2 fastText
word2vecより進歩したベクトル化手法であるfastTextによる単語のベクトル表現を使用します。
日本語の学習モデルを以下の記事にて公開してくださっているので、使用させていただきます。
```
# Qiita:いますぐ使える単語埋め込みベクトルのリスト
# https://qiita.com/Hironsan/items/8f7d35f0a36e0f99752c
# Download Word Vectors
# https://drive.google.com/open?id=0ByFQ96A4DgSPNFdleG1GaHcxQzA
# torchtextで単語ベクトルとして読み込みます
# word2vecとは異なり、すぐに読み込めます
from torchtext.vocab import Vectors
japanese_fasttext_vectors = Vectors(name='./data/vector_neologd/model.vec')
# 単語ベクトルの中身を確認します
print("1単語を表現する次元数:", japanese_fasttext_vectors.dim)
print("単語数:", len(japanese_fasttext_vectors.itos))
# ベクトル化したバージョンのボキャブラリーを作成します
TEXT.build_vocab(train_ds, vectors=japanese_fasttext_vectors, min_freq=1)
# ボキャブラリーのベクトルを確認します
print(TEXT.vocab.vectors.shape) # 52個の単語が300次元のベクトルで表現されている
TEXT.vocab.vectors
# ボキャブラリーの単語の順番を確認します
TEXT.vocab.stoi
# 姫 - 女性 + 男性 のベクトルがどれと似ているのか確認してみます
import torch.nn.functional as F
# 姫 - 女性 + 男性
tensor_calc = TEXT.vocab.vectors[41] - \
TEXT.vocab.vectors[38] + TEXT.vocab.vectors[46]
# コサイン類似度を計算
# dim=0 は0次元目で計算してくださいという指定
print("女王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[39], dim=0))
print("王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[44], dim=0))
print("王子", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[45], dim=0))
print("機械学習", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[43], dim=0))
```
姫 - 女性 + 男性 を計算すると狙った通り、王子がもっとも近い結果になりました
以上
|
github_jupyter
|
# 単語分割にはMecab+NEologdを使用
import MeCab
m_t = MeCab.Tagger('-Owakati -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
def tokenizer_mecab(text):
text = m_t.parse(text) # これでスペースで単語が区切られる
ret = text.strip().split() # スペース部分で区切ったリストに変換
return ret
# 前処理として正規化をする関数を定義
import re
def preprocessing_text(text):
# 改行、半角スペース、全角スペースを削除
text = re.sub('\r', '', text)
text = re.sub('\n', '', text)
text = re.sub(' ', '', text)
text = re.sub(' ', '', text)
# 数字文字の一律「0」化
text = re.sub(r'[0-9 0-9]', '0', text) # 数字
return text
# 前処理とJanomeの単語分割を合わせた関数を定義する
def tokenizer_with_preprocessing(text):
text = preprocessing_text(text) # 前処理の正規化
ret = tokenizer_mecab(text) # Mecabの単語分割
return ret
import torchtext
# tsvやcsvデータを読み込んだときに、読み込んだ内容に対して行う処理を定義します
# 文章とラベルの両方に用意します
max_length = 25
TEXT = torchtext.data.Field(sequential=True, tokenize=tokenizer_with_preprocessing,
use_vocab=True, lower=True, include_lengths=True, batch_first=True, fix_length=max_length)
LABEL = torchtext.data.Field(sequential=False, use_vocab=False)
# フォルダ「data」から各tsvファイルを読み込みます
train_ds, val_ds, test_ds = torchtext.data.TabularDataset.splits(
path='./data/', train='text_train.tsv',
validation='text_val.tsv', test='text_test.tsv', format='tsv',
fields=[('Text', TEXT), ('Label', LABEL)])
# 以下から、日本語のfasttextの学習済みベクトルをダウンロードします
# 東北大学 乾・岡崎研究室:日本語 Wikipedia エンティティベクトル
# http://www.cl.ecei.tohoku.ac.jp/~m-suzuki/jawiki_vector/
# http://www.cl.ecei.tohoku.ac.jp/~m-suzuki/jawiki_vector/data/20170201.tar.bz2
# そのままではtorchtextで読み込めないので、gensimライブラリを使用して、
# Word2Vecのformatで保存し直します
# 事前インストール
# pip install gensim
from gensim.models import KeyedVectors
# 一度gensimライブラリで読み込んで、word2vecのformatで保存する
model = KeyedVectors.load_word2vec_format(
'./data/entity_vector/entity_vector.model.bin', binary=True)
# 保存(時間がかかります、10分弱)
model.wv.save_word2vec_format('./data/japanese_word2vec_vectors.vec')
# torchtextで単語ベクトルとして読み込みます
from torchtext.vocab import Vectors
japanese_word2vec_vectors = Vectors(
name='./data/japanese_word2vec_vectors.vec')
# 単語ベクトルの中身を確認します
print("1単語を表現する次元数:", japanese_word2vec_vectors.dim)
print("単語数:", len(japanese_word2vec_vectors.itos))
# ベクトル化したバージョンのボキャブラリーを作成します
TEXT.build_vocab(train_ds, vectors=japanese_word2vec_vectors, min_freq=1)
# ボキャブラリーのベクトルを確認します
print(TEXT.vocab.vectors.shape) # 49個の単語が200次元のベクトルで表現されている
TEXT.vocab.vectors
# ボキャブラリーの単語の順番を確認します
TEXT.vocab.stoi
# 姫 - 女性 + 男性 のベクトルがどれと似ているのか確認してみます
import torch.nn.functional as F
# 姫 - 女性 + 男性
tensor_calc = TEXT.vocab.vectors[41] - \
TEXT.vocab.vectors[38] + TEXT.vocab.vectors[46]
# コサイン類似度を計算
# dim=0 は0次元目で計算してくださいという指定
print("女王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[39], dim=0))
print("王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[44], dim=0))
print("王子", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[45], dim=0))
print("機械学習", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[43], dim=0))
# Qiita:いますぐ使える単語埋め込みベクトルのリスト
# https://qiita.com/Hironsan/items/8f7d35f0a36e0f99752c
# Download Word Vectors
# https://drive.google.com/open?id=0ByFQ96A4DgSPNFdleG1GaHcxQzA
# torchtextで単語ベクトルとして読み込みます
# word2vecとは異なり、すぐに読み込めます
from torchtext.vocab import Vectors
japanese_fasttext_vectors = Vectors(name='./data/vector_neologd/model.vec')
# 単語ベクトルの中身を確認します
print("1単語を表現する次元数:", japanese_fasttext_vectors.dim)
print("単語数:", len(japanese_fasttext_vectors.itos))
# ベクトル化したバージョンのボキャブラリーを作成します
TEXT.build_vocab(train_ds, vectors=japanese_fasttext_vectors, min_freq=1)
# ボキャブラリーのベクトルを確認します
print(TEXT.vocab.vectors.shape) # 52個の単語が300次元のベクトルで表現されている
TEXT.vocab.vectors
# ボキャブラリーの単語の順番を確認します
TEXT.vocab.stoi
# 姫 - 女性 + 男性 のベクトルがどれと似ているのか確認してみます
import torch.nn.functional as F
# 姫 - 女性 + 男性
tensor_calc = TEXT.vocab.vectors[41] - \
TEXT.vocab.vectors[38] + TEXT.vocab.vectors[46]
# コサイン類似度を計算
# dim=0 は0次元目で計算してくださいという指定
print("女王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[39], dim=0))
print("王", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[44], dim=0))
print("王子", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[45], dim=0))
print("機械学習", F.cosine_similarity(tensor_calc, TEXT.vocab.vectors[43], dim=0))
| 0.305801 | 0.904102 |
# Predicting Boston Housing Prices
## Using XGBoost in SageMaker (Deploy)
_Deep Learning Nanodegree Program | Deployment_
---
As an introduction to using SageMaker's High Level Python API we will look at a relatively simple problem. Namely, we will use the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the median value of a home in the area of Boston Mass.
The documentation for the high level API can be found on the [ReadTheDocs page](http://sagemaker.readthedocs.io/en/latest/)
## General Outline
Typically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons.
1. Download or otherwise retrieve the data.
2. Process / Prepare the data.
3. Upload the processed data to S3.
4. Train a chosen model.
5. Test the trained model (typically using a batch transform job).
6. Deploy the trained model.
7. Use the deployed model.
In this notebook we will be skipping step 5, testing the model. We will still test the model but we will do so by first deploying the model and then sending the test data to the deployed model.
## Step 0: Setting up the notebook
We begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need.
```
%matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
import sklearn.model_selection
```
In addition to the modules above, we need to import the various bits of SageMaker that we will be using.
```
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
from sagemaker.predictor import csv_serializer
# This is an object that represents the SageMaker session that we are currently operating in. This
# object contains some useful information that we will need to access later such as our region.
session = sagemaker.Session()
# This is an object that represents the IAM role that we are currently assigned. When we construct
# and launch the training job later we will need to tell it what IAM role it should have. Since our
# use case is relatively simple we will simply assign the training job the role we currently have.
role = get_execution_role()
```
## Step 1: Downloading the data
Fortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward.
```
boston = load_boston()
```
## Step 2: Preparing and splitting the data
Given that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets.
```
# First we package up the input data and the target variable (the median value) as pandas dataframes. This
# will make saving the data to a file a little easier later on.
X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names)
Y_bos_pd = pd.DataFrame(boston.target)
# We split the dataset into 2/3 training and 1/3 testing sets.
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33)
# Then we split the training set further into 2/3 training and 1/3 validation sets.
X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)
```
## Step 3: Uploading the training and validation files to S3
When a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details.
### Save the data locally
First we need to create the train and validation csv files which we will then upload to S3.
```
# This is our local data directory. We need to make sure that it exists.
data_dir = '../data/boston'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# We use pandas to save our train and validation data to csv files. Note that we make sure not to include header
# information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed
# that the first entry in each row is the target variable.
pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
```
### Upload to S3
Since we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project.
```
prefix = 'boston-xgboost-deploy-hl'
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
```
## Step 4: Train the XGBoost model
Now that we have the training and validation data uploaded to S3, we can construct our XGBoost model and train it. We will be making use of the high level SageMaker API to do this which will make the resulting code a little easier to read at the cost of some flexibility.
To construct an estimator, the object which we wish to train, we need to provide the location of a container which contains the training code. Since we are using a built in algorithm this container is provided by Amazon. However, the full name of the container is a bit lengthy and depends on the region that we are operating in. Fortunately, SageMaker provides a useful utility method called `get_image_uri` that constructs the image name for us.
To use the `get_image_uri` method we need to provide it with our current region, which can be obtained from the session object, and the name of the algorithm we wish to use. In this notebook we will be using XGBoost however you could try another algorithm if you wish. The list of built in algorithms can be found in the list of [Common Parameters](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html).
```
# As stated above, we use this utility method to construct the image name for the training container.
container = get_image_uri(session.boto_region_name, 'xgboost',repo_version='0.90-2')
# Now that we know which container to use, we can construct the estimator object.
xgb = sagemaker.estimator.Estimator(container, # The name of the training container
role, # The IAM role to use (our current role in this case)
train_instance_count=1, # The number of instances to use for training
train_instance_type='ml.m4.xlarge', # The type of instance ot use for training
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
# Where to save the output (the model artifacts)
sagemaker_session=session) # The current SageMaker session
```
Before asking SageMaker to begin the training job, we should probably set any model specific hyperparameters. There are quite a few that can be set when using the XGBoost algorithm, below are just a few of them. If you would like to change the hyperparameters below or modify additional ones you can find additional information on the [XGBoost hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html)
```
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
objective='reg:linear',
early_stopping_rounds=10,
num_round=200)
```
Now that we have our estimator object completely set up, it is time to train it. To do this we make sure that SageMaker knows our input data is in csv format and then execute the `fit` method.
```
# This is a wrapper around the location of our train and validation data, to make sure that SageMaker
# knows our data is in csv format.
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
```
## Step 5: Test the trained model
We will be skipping this step for now. We will still test our trained model but we are going to do it by using the deployed model, rather than setting up a batch transform job.
## Step 6: Deploy the trained model
Now that we have fit our model to the training data, using the validation data to avoid overfitting, we can deploy our model and test it. Deploying is very simple when we use the high level API, we need only call the `deploy` method of our trained estimator.
**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.
In other words **If you are no longer using a deployed endpoint, shut it down!**
```
xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
```
## Step 7: Use the model
Now that our model is trained and deployed we can send the test data to it and evaluate the results. Here, because our test data is so small, we can send it all using a single call to our endpoint. If our test dataset was larger we would need to split it up and send the data in chunks, making sure to accumulate the results.
```
# We need to tell the endpoint what format the data we are sending is in
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
Y_pred = xgb_predictor.predict(X_test.values).decode('utf-8')
# predictions is currently a comma delimited string and so we would like to break it up
# as a numpy array.
Y_pred = np.fromstring(Y_pred, sep=',')
```
To see how well our model works we can create a simple scatter plot between the predicted and actual values. If the model was completely accurate the resulting scatter plot would look like the line $x=y$. As we can see, our model seems to have done okay but there is room for improvement.
```
plt.scatter(Y_test, Y_pred)
plt.xlabel("Median Price")
plt.ylabel("Predicted Price")
plt.title("Median Price vs Predicted Price")
```
## Delete the endpoint
Since we are no longer using the deployed model we need to make sure to shut it down. Remember that you have to pay for the length of time that your endpoint is deployed so the longer it is left running, the more it costs.
```
xgb_predictor.delete_endpoint()
```
## Optional: Clean up
The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook.
```
# First we will remove all of the files contained in the data_dir directory
!rm $data_dir/*
# And then we delete the directory itself
!rmdir $data_dir
```
|
github_jupyter
|
%matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
import sklearn.model_selection
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
from sagemaker.predictor import csv_serializer
# This is an object that represents the SageMaker session that we are currently operating in. This
# object contains some useful information that we will need to access later such as our region.
session = sagemaker.Session()
# This is an object that represents the IAM role that we are currently assigned. When we construct
# and launch the training job later we will need to tell it what IAM role it should have. Since our
# use case is relatively simple we will simply assign the training job the role we currently have.
role = get_execution_role()
boston = load_boston()
# First we package up the input data and the target variable (the median value) as pandas dataframes. This
# will make saving the data to a file a little easier later on.
X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names)
Y_bos_pd = pd.DataFrame(boston.target)
# We split the dataset into 2/3 training and 1/3 testing sets.
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33)
# Then we split the training set further into 2/3 training and 1/3 validation sets.
X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33)
# This is our local data directory. We need to make sure that it exists.
data_dir = '../data/boston'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# We use pandas to save our train and validation data to csv files. Note that we make sure not to include header
# information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed
# that the first entry in each row is the target variable.
pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False)
pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
prefix = 'boston-xgboost-deploy-hl'
val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix)
train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix)
# As stated above, we use this utility method to construct the image name for the training container.
container = get_image_uri(session.boto_region_name, 'xgboost',repo_version='0.90-2')
# Now that we know which container to use, we can construct the estimator object.
xgb = sagemaker.estimator.Estimator(container, # The name of the training container
role, # The IAM role to use (our current role in this case)
train_instance_count=1, # The number of instances to use for training
train_instance_type='ml.m4.xlarge', # The type of instance ot use for training
output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix),
# Where to save the output (the model artifacts)
sagemaker_session=session) # The current SageMaker session
xgb.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
objective='reg:linear',
early_stopping_rounds=10,
num_round=200)
# This is a wrapper around the location of our train and validation data, to make sure that SageMaker
# knows our data is in csv format.
s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv')
s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv')
xgb.fit({'train': s3_input_train, 'validation': s3_input_validation})
xgb_predictor = xgb.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# We need to tell the endpoint what format the data we are sending is in
xgb_predictor.content_type = 'text/csv'
xgb_predictor.serializer = csv_serializer
Y_pred = xgb_predictor.predict(X_test.values).decode('utf-8')
# predictions is currently a comma delimited string and so we would like to break it up
# as a numpy array.
Y_pred = np.fromstring(Y_pred, sep=',')
plt.scatter(Y_test, Y_pred)
plt.xlabel("Median Price")
plt.ylabel("Predicted Price")
plt.title("Median Price vs Predicted Price")
xgb_predictor.delete_endpoint()
# First we will remove all of the files contained in the data_dir directory
!rm $data_dir/*
# And then we delete the directory itself
!rmdir $data_dir
| 0.492676 | 0.988591 |
```
## tensorflow-gpu==2.3.0rc1 bug to load_weight after call inference
!pip install tensorflow-gpu==2.2.0
import yaml
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow_tts.inference import AutoConfig
from tensorflow_tts.inference import TFAutoModel
from tensorflow_tts.inference import AutoProcessor
import IPython.display as ipd
processor = AutoProcessor.from_pretrained("../tensorflow_tts/processor/pretrained/ljspeech_mapper.json")
input_text = "i love you so much."
input_ids = processor.text_to_sequence(input_text)
config = AutoConfig.from_pretrained("../examples_tts/tacotron2/conf/tacotron2.v1.yaml")
tacotron2 = TFAutoModel.from_pretrained(
config=config,
pretrained_path=None,
is_build=False, # don't build model if you want to save it to pb. (TF related bug)
name="tacotron2"
)
tacotron2.setup_window(win_front=6, win_back=6)
tacotron2.setup_maximum_iterations(3000)
```
# Save to Pb
```
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
input_lengths=tf.convert_to_tensor([len(input_ids)], tf.int32),
speaker_ids=tf.convert_to_tensor([0], dtype=tf.int32),
)
tacotron2.load_weights("../examples_tts/tacotron2/checkpoints/tacotron2.v1-120K.h5")
# save model into pb and do inference. Note that signatures should be a tf.function with input_signatures.
tf.saved_model.save(tacotron2, "./test_saved", signatures=tacotron2.inference)
```
# Load and Inference
```
tacotron2 = tf.saved_model.load("./test_saved")
input_text = "Unless you work on a ship, it's unlikely that you use the word boatswain in everyday conversation, so it's understandably a tricky one. The word - which refers to a petty officer in charge of hull maintenance is not pronounced boats-wain Rather, it's bo-sun to reflect the salty pronunciation of sailors, as The Free Dictionary explains."
input_ids = processor.text_to_sequence(input_text)
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
tf.convert_to_tensor([len(input_ids)], tf.int32),
tf.convert_to_tensor([0], dtype=tf.int32)
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f'Alignment steps')
im = ax.imshow(
alignment_history[0].numpy(),
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.show()
plt.close()
mel_outputs = tf.reshape(mel_outputs, [-1, 80]).numpy()
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-after-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
plt.close()
```
# Let inference other input to check dynamic shape
```
input_text = "The Commission further recommends that the Secret Service coordinate its planning as closely as possible with all of the Federal agencies from which it receives information."
input_ids = processor.text_to_sequence(input_text)
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
tf.convert_to_tensor([len(input_ids)], tf.int32),
tf.convert_to_tensor([0], dtype=tf.int32),
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f'Alignment steps')
im = ax.imshow(
alignment_history[0].numpy(),
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.show()
plt.close()
mel_outputs = tf.reshape(mel_outputs, [-1, 80]).numpy()
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-after-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
plt.close()
```
|
github_jupyter
|
## tensorflow-gpu==2.3.0rc1 bug to load_weight after call inference
!pip install tensorflow-gpu==2.2.0
import yaml
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow_tts.inference import AutoConfig
from tensorflow_tts.inference import TFAutoModel
from tensorflow_tts.inference import AutoProcessor
import IPython.display as ipd
processor = AutoProcessor.from_pretrained("../tensorflow_tts/processor/pretrained/ljspeech_mapper.json")
input_text = "i love you so much."
input_ids = processor.text_to_sequence(input_text)
config = AutoConfig.from_pretrained("../examples_tts/tacotron2/conf/tacotron2.v1.yaml")
tacotron2 = TFAutoModel.from_pretrained(
config=config,
pretrained_path=None,
is_build=False, # don't build model if you want to save it to pb. (TF related bug)
name="tacotron2"
)
tacotron2.setup_window(win_front=6, win_back=6)
tacotron2.setup_maximum_iterations(3000)
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
input_lengths=tf.convert_to_tensor([len(input_ids)], tf.int32),
speaker_ids=tf.convert_to_tensor([0], dtype=tf.int32),
)
tacotron2.load_weights("../examples_tts/tacotron2/checkpoints/tacotron2.v1-120K.h5")
# save model into pb and do inference. Note that signatures should be a tf.function with input_signatures.
tf.saved_model.save(tacotron2, "./test_saved", signatures=tacotron2.inference)
tacotron2 = tf.saved_model.load("./test_saved")
input_text = "Unless you work on a ship, it's unlikely that you use the word boatswain in everyday conversation, so it's understandably a tricky one. The word - which refers to a petty officer in charge of hull maintenance is not pronounced boats-wain Rather, it's bo-sun to reflect the salty pronunciation of sailors, as The Free Dictionary explains."
input_ids = processor.text_to_sequence(input_text)
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
tf.convert_to_tensor([len(input_ids)], tf.int32),
tf.convert_to_tensor([0], dtype=tf.int32)
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f'Alignment steps')
im = ax.imshow(
alignment_history[0].numpy(),
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.show()
plt.close()
mel_outputs = tf.reshape(mel_outputs, [-1, 80]).numpy()
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-after-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
plt.close()
input_text = "The Commission further recommends that the Secret Service coordinate its planning as closely as possible with all of the Federal agencies from which it receives information."
input_ids = processor.text_to_sequence(input_text)
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
tf.convert_to_tensor([len(input_ids)], tf.int32),
tf.convert_to_tensor([0], dtype=tf.int32),
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f'Alignment steps')
im = ax.imshow(
alignment_history[0].numpy(),
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.show()
plt.close()
mel_outputs = tf.reshape(mel_outputs, [-1, 80]).numpy()
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax1.set_title(f'Predicted Mel-after-Spectrogram')
im = ax1.imshow(np.rot90(mel_outputs), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)
plt.show()
plt.close()
| 0.706089 | 0.729279 |
<h2>CS142 - Computability and Complexity </h2>
<h3>Using the Python Automata Simulation Library</h3>
<b>Automata</b> is Python library Copyright 2016-2019 Caleb Evans
Released under the MIT license.<br><br>
This notebook is meant to introduce you to simulating <b>DFAs</b> and <b>NFAs</b> using the Python library <b>Automata</b></br>
The library was selected because it accurately defines and simulates the behavior of automata. Here we focus on these two features of the library:
<UL>
<li> NFAs and DFAs are created using the exact formal definition (make sure you understand these defintions and document them in your programs),
<li> The library respects the limited capability of DFAs and NFAs (e.g., read an input string, accept or reject)
</UL>
You don't need to go to new lengths to learn concepts you haven't encountered yet, independently of your Python coding level, you should be able to explain and exercise the concepts covered in the first 9 lessons of CS142.
To begin using automata-lib, you need to install the Python package on your computer.
<p align="center"><b> > pip install automata-lib</b></p>
Make sure you are using the correct python in your computer. If you want to use this package within anaconda, make sure you are using the pip package that is in anacond (e.g., >/anaconda3/bin/pip install . . )<br>
```
# First we import the base automaton
from automata.base.automaton import Automaton #Begin by importing the following
from automata.fa.fa import FA # FA is the class of Finite Automata
from automata.fa.dfa import DFA # DFA is the class of Deterministic Finite Automata depends on FA
from automata.fa.nfa import NFA # NFA is tha class of Nondeterministic Finite Automata depends on FA
```
<H2> Deterministic Finite Automata (DFA)</H2>
```
"""
The following code uses Automata-lib to create a DFA.
Recall the formal defintion of a DFA (Q, Sigma, delta, q0, F)
The formal definition requires the 5 tuple <Q, Sigma, delta, q0, F>, where:
Q = Set of possible states
Sigma = Input alphabet
delta = Transition function d: Q X Sigma ---> Q
q0: Start state
F : A set of accept states
This is an example that follows exercise 1.6 (a) from Sipser
L(dfa1) = {w| w begins with a 1 and ends with a 0}
"""
dfa1 = DFA(
states={'q0', 'q1', 'q2', 'q3'}, #Enumerate the states of the automaton
input_symbols={'0', '1'}, #The alphabet
transitions={
'q0': {'0': 'q3', '1': 'q1'}, #The transition function (delta)
'q1': {'0': 'q2', '1': 'q1'},
'q2': {'0': 'q2', '1': 'q1'},
'q3': {'0': 'q3', '1': 'q3'}
},
initial_state='q0', # A single initial state
final_states={'q2'} # A set of states
)
"""
Recall that DFA recognize regular languages. Thus, this is the only computation they perform.
All these computation involve is to determine whether a string is in a language.
Therefore, DFA can only accept a string (reach and accept state) or reject it (doesn't reach
an accept state by the time it completes reading the input string)
In Automata-lib there are two main methods associate with DFAs:
Method 1: read_input("input-string").
This method returns the final state of the DFA after it reads all the input string.
If the DFA accepts, it reaches one of the accept states, else it rejects by never getting
to one of the accept states. If it reject, this implementation returns an error (ends in the wrong state)
Consider the following input string examples of the first method: "10", "100011101010", "100111001"
"""
s = ["10","100011101010","100111001"]
print(':---------------------------------------------:')
print('computing input string 1:',s[0])
print('result of computation: ',dfa1.read_input(s[0]))
print(':---------------------------------------------:')
print('computing string 2:',s[1])
print('result of computation: ',dfa1.read_input(s[1]))
print(':---------------------------------------------:')
print('computing string 3:',s[2])
print('result of computation: ',dfa1.read_input(s[2]))
print(':---------------------------------------------:')
```
<h2>[EXERCISE 1]</h2>
<b><i>Submit the answer to this exercise, and all other exercises in this Python Notebook, with your pre-class work for session 3.2.</i></b><br>
<h3>Explain the output from the three <b>dfa1</b> computations in the previous cell.</h3>
-> So, the third string ends up at the q1 state which is not the accept state. it goes from q0 to q1, and then goes back and fourth from q1 and q2 resulting it to end at q1
```
"""
Method 2: the second method in Automata-lib is:
accepts_input(input-string).
This method returns True if the DFA accepted the string, otherwise returns False.
Thus, it tells us whether the DFA accepts the string or not.
Consider the following input string examples of the second method: "10", "100011101010", "100111001"
"""
s = ["10","100011101010","100111001"]
print(':---------------------------------------------:')
print('computing input string 1:',s[0])
print('result of computation: ',dfa1.accepts_input(s[0]))
print(':---------------------------------------------:')
print('computing string 2:',s[1])
print('result of computation: ',dfa1.accepts_input(s[1]))
print(':---------------------------------------------:')
print('computing string 3:',s[2])
print('result of computation: ',dfa1.accepts_input(s[2]))
print(':---------------------------------------------:')
"""
Automata-lib has a method that help its users step through DFA computations.
Unfortunately, it doesn't work properly, but here is a function, DFAIncremental,
that provides a fix.
DFAIncremental is a function that steps through the DFA computation,
one input character at a time. Notice that DFAIncremental is This is after one has define
the DFA).
"""
def DFAIncremental(DFA, Input):
StorageList = []
current_state = DFA.initial_state
print(current_state)
for i in Input: # Reading the input string, Input, one character a time
current_state = DFA._get_next_current_state(
current_state, i)
print(current_state)
StorageList.append(current_state)
if StorageList[(len(StorageList))-1] in DFA.final_states:
print("The DFA Accepts the Input String")
else:
print("The DFA Rejects the Input String")
DFAIncremental(dfa1,"101010101010")
```
<H2> Nondeterministic Finite Automata (NFA)</H2>
```
"""
The following code uses Automata-lib to create an NFA.
Recall the formal defintion of a NFA (Q, Sigma, delta, q0, F)
The formal definition requires the 5 tuple <Q, Sigma, delta, q0, F>, where:
Q = Set of possible states
Sigma = Input alphabet
delta = Transition function d: Q X Sigma ---> P(Q), P(Q) is the power set of Q
q0: Start state
F : A set of accept states
"""
nfa1 = NFA(
states={'q0', 'q1', 'q2', 'q3'}, # Define set of states (Q)
input_symbols={'0', '1'}, # Define the alphabet (Sigma)
transitions={ # Define the set of transition rules or transition function (delta)
'q0': {'0': {'q0'}, '1': {'q0','q1'}},
'q1': { '': {'q2'}, '0': {'q2'}, '1':{'q1'}},
'q2': {'0': {'q3'}},
'q3': {},
},
initial_state='q0', # Initial state
final_states={'q3'} # Set of accept states
)
```
<h2>[EXERCISE 2]</h2>
<h3>Draw the corresponding state diagram for <b>nfa1</b> base on its formal definition. Include the image of your state diagram in your pre-class work answers for session 3.2.</h3>
```
from IPython.display import Image
Image(filename = 'C:/Users/green/Desktop/1.jpg') #DFA image
Image(filename = 'C:/Users/green/Desktop/nfa.jpg')
"""
Like DFAs, NFA computations only involve recognizing whether a string is in a language.
Therefore, NFA can only accept or reject a string. NFA nondeterministically can follow
multiple parallel computational branches.
In Automata-lib there are two main methods associate with NFAs:
Method 1: read_input("input-string").
This method returns the list of the states the automaton stops at (the multiple computation branches)
If an accept state is in the list, then the NFA accepts. Otherwise, it returns an error
(if the NFA rejects input)
"""
# Testing nfa1 with three different input strings
lst = ["10100","000100",'10100', "1010000"] #? 1010 and 10100 works, but not 101000 or having more than 2 0s(because there are no ways to go after q3 with extra input)
for istr in lst:
print(':---------------------------------------------:')
print(f"computing input string 1:{istr}")
print(nfa1.read_input(istr))
```
<h2>[EXERCISE 3]</h2>
<b><i>Submit the answer to this exercise with your pre-class work for session 3.2.</i></b><br>
<h3>Explain the output from the three <b>nfa1</b> computations in the previous cell.<h3>
-> Because the q3 does not have anywhere to go, the result becomes automatically false. (why not stop at the position tho.. tragic)
```
"""
Method 2 is accepts_input("input-string").
This method returns True if the NFA accepted the string, otherwise returns False.
Thus, it tells us whether the NFA accepts or rejects an input string rather than generating an exception.
"""
print(nfa1.accepts_input("10"))
print(nfa1.accepts_input("101"))
```
<h2> OTHER DFA AND NFA OPERATIONS</h2>
```
"""
Two more DFA methods:
- DFA.minify() : This method attemps to create a DFA that accepts the same language
as the source DFA but with fewer state (optimize the number of DFA states)
- DFA.from_nfa : Converts an NFA into a corresponding DFA (since for every NFA, there exists an
equivalent DFA that accepts the same language)
"""
minimized_dfa = dfa1.minify()
print('MINIMIZED DFA:')
print('DFA States:',minimized_dfa.states) #Inspecting the DFA states to see whether the DFA has been optimized
#The output is a DFA with the smallest possible set of states that recognize
#the same language
print('DFA Transitions:',minimized_dfa.transitions) #Inspecting the transition rules
print('DFA Accept States:',minimized_dfa.final_states)
"""
Similarly, the NFA object has a method NFA.from_dfa() which converts a DFA into a corresponding NFA that accepts the same
language.
"""
nfa2 = NFA.from_dfa(dfa1)
nfa2.read_input("1100110")
```
|
github_jupyter
|
# First we import the base automaton
from automata.base.automaton import Automaton #Begin by importing the following
from automata.fa.fa import FA # FA is the class of Finite Automata
from automata.fa.dfa import DFA # DFA is the class of Deterministic Finite Automata depends on FA
from automata.fa.nfa import NFA # NFA is tha class of Nondeterministic Finite Automata depends on FA
"""
The following code uses Automata-lib to create a DFA.
Recall the formal defintion of a DFA (Q, Sigma, delta, q0, F)
The formal definition requires the 5 tuple <Q, Sigma, delta, q0, F>, where:
Q = Set of possible states
Sigma = Input alphabet
delta = Transition function d: Q X Sigma ---> Q
q0: Start state
F : A set of accept states
This is an example that follows exercise 1.6 (a) from Sipser
L(dfa1) = {w| w begins with a 1 and ends with a 0}
"""
dfa1 = DFA(
states={'q0', 'q1', 'q2', 'q3'}, #Enumerate the states of the automaton
input_symbols={'0', '1'}, #The alphabet
transitions={
'q0': {'0': 'q3', '1': 'q1'}, #The transition function (delta)
'q1': {'0': 'q2', '1': 'q1'},
'q2': {'0': 'q2', '1': 'q1'},
'q3': {'0': 'q3', '1': 'q3'}
},
initial_state='q0', # A single initial state
final_states={'q2'} # A set of states
)
"""
Recall that DFA recognize regular languages. Thus, this is the only computation they perform.
All these computation involve is to determine whether a string is in a language.
Therefore, DFA can only accept a string (reach and accept state) or reject it (doesn't reach
an accept state by the time it completes reading the input string)
In Automata-lib there are two main methods associate with DFAs:
Method 1: read_input("input-string").
This method returns the final state of the DFA after it reads all the input string.
If the DFA accepts, it reaches one of the accept states, else it rejects by never getting
to one of the accept states. If it reject, this implementation returns an error (ends in the wrong state)
Consider the following input string examples of the first method: "10", "100011101010", "100111001"
"""
s = ["10","100011101010","100111001"]
print(':---------------------------------------------:')
print('computing input string 1:',s[0])
print('result of computation: ',dfa1.read_input(s[0]))
print(':---------------------------------------------:')
print('computing string 2:',s[1])
print('result of computation: ',dfa1.read_input(s[1]))
print(':---------------------------------------------:')
print('computing string 3:',s[2])
print('result of computation: ',dfa1.read_input(s[2]))
print(':---------------------------------------------:')
"""
Method 2: the second method in Automata-lib is:
accepts_input(input-string).
This method returns True if the DFA accepted the string, otherwise returns False.
Thus, it tells us whether the DFA accepts the string or not.
Consider the following input string examples of the second method: "10", "100011101010", "100111001"
"""
s = ["10","100011101010","100111001"]
print(':---------------------------------------------:')
print('computing input string 1:',s[0])
print('result of computation: ',dfa1.accepts_input(s[0]))
print(':---------------------------------------------:')
print('computing string 2:',s[1])
print('result of computation: ',dfa1.accepts_input(s[1]))
print(':---------------------------------------------:')
print('computing string 3:',s[2])
print('result of computation: ',dfa1.accepts_input(s[2]))
print(':---------------------------------------------:')
"""
Automata-lib has a method that help its users step through DFA computations.
Unfortunately, it doesn't work properly, but here is a function, DFAIncremental,
that provides a fix.
DFAIncremental is a function that steps through the DFA computation,
one input character at a time. Notice that DFAIncremental is This is after one has define
the DFA).
"""
def DFAIncremental(DFA, Input):
StorageList = []
current_state = DFA.initial_state
print(current_state)
for i in Input: # Reading the input string, Input, one character a time
current_state = DFA._get_next_current_state(
current_state, i)
print(current_state)
StorageList.append(current_state)
if StorageList[(len(StorageList))-1] in DFA.final_states:
print("The DFA Accepts the Input String")
else:
print("The DFA Rejects the Input String")
DFAIncremental(dfa1,"101010101010")
"""
The following code uses Automata-lib to create an NFA.
Recall the formal defintion of a NFA (Q, Sigma, delta, q0, F)
The formal definition requires the 5 tuple <Q, Sigma, delta, q0, F>, where:
Q = Set of possible states
Sigma = Input alphabet
delta = Transition function d: Q X Sigma ---> P(Q), P(Q) is the power set of Q
q0: Start state
F : A set of accept states
"""
nfa1 = NFA(
states={'q0', 'q1', 'q2', 'q3'}, # Define set of states (Q)
input_symbols={'0', '1'}, # Define the alphabet (Sigma)
transitions={ # Define the set of transition rules or transition function (delta)
'q0': {'0': {'q0'}, '1': {'q0','q1'}},
'q1': { '': {'q2'}, '0': {'q2'}, '1':{'q1'}},
'q2': {'0': {'q3'}},
'q3': {},
},
initial_state='q0', # Initial state
final_states={'q3'} # Set of accept states
)
from IPython.display import Image
Image(filename = 'C:/Users/green/Desktop/1.jpg') #DFA image
Image(filename = 'C:/Users/green/Desktop/nfa.jpg')
"""
Like DFAs, NFA computations only involve recognizing whether a string is in a language.
Therefore, NFA can only accept or reject a string. NFA nondeterministically can follow
multiple parallel computational branches.
In Automata-lib there are two main methods associate with NFAs:
Method 1: read_input("input-string").
This method returns the list of the states the automaton stops at (the multiple computation branches)
If an accept state is in the list, then the NFA accepts. Otherwise, it returns an error
(if the NFA rejects input)
"""
# Testing nfa1 with three different input strings
lst = ["10100","000100",'10100', "1010000"] #? 1010 and 10100 works, but not 101000 or having more than 2 0s(because there are no ways to go after q3 with extra input)
for istr in lst:
print(':---------------------------------------------:')
print(f"computing input string 1:{istr}")
print(nfa1.read_input(istr))
"""
Method 2 is accepts_input("input-string").
This method returns True if the NFA accepted the string, otherwise returns False.
Thus, it tells us whether the NFA accepts or rejects an input string rather than generating an exception.
"""
print(nfa1.accepts_input("10"))
print(nfa1.accepts_input("101"))
"""
Two more DFA methods:
- DFA.minify() : This method attemps to create a DFA that accepts the same language
as the source DFA but with fewer state (optimize the number of DFA states)
- DFA.from_nfa : Converts an NFA into a corresponding DFA (since for every NFA, there exists an
equivalent DFA that accepts the same language)
"""
minimized_dfa = dfa1.minify()
print('MINIMIZED DFA:')
print('DFA States:',minimized_dfa.states) #Inspecting the DFA states to see whether the DFA has been optimized
#The output is a DFA with the smallest possible set of states that recognize
#the same language
print('DFA Transitions:',minimized_dfa.transitions) #Inspecting the transition rules
print('DFA Accept States:',minimized_dfa.final_states)
"""
Similarly, the NFA object has a method NFA.from_dfa() which converts a DFA into a corresponding NFA that accepts the same
language.
"""
nfa2 = NFA.from_dfa(dfa1)
nfa2.read_input("1100110")
| 0.627723 | 0.985896 |
# General Imports and Downloads
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import nltk
from nltk.corpus import twitter_samples
nltk.download('twitter_samples')
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem import PorterStemmer
import re
```
# Load the data
```
def load_tweet():
'''
Load the positive and negative tweets
'''
positive_tweets = twitter_samples.strings('positive_tweets.json')
negative_tweets = twitter_samples.strings('negative_tweets.json')
return positive_tweets, negative_tweets
positive_tweets, negative_tweets = load_tweet()
print(f'Positive Tweets length: {len(positive_tweets)}')
print(f'Negative Tweets length: {len(negative_tweets)}')
## splitting the positive and negative tweets in 80:20 split
def split_pos_neg_tweets(pos_tweets, neg_tweets, split=0.8):
'''
Splits the positive and negative tweets and returns training and val_test datasets
'''
max_train_rows = int(len(pos_tweets) * split)
print(f'Splitting the dataset in the ratio: {split}')
train_pos = pos_tweets[:max_train_rows]
val_pos = pos_tweets[max_train_rows:]
train_neg = neg_tweets[:max_train_rows]
val_neg = neg_tweets[max_train_rows:]
train_label = np.append(np.ones(len(train_pos)), np.zeros(len(train_neg)))
val_label = np.append(np.ones(len(val_pos)), np.zeros(len(val_neg)))
print(f'Total Training Rows (pos+neg) : {len(train_pos + train_neg)}')
print(f'Total Validation Rows (pos+neg): {len(val_pos + val_neg)}')
return train_pos + train_neg , val_pos + val_neg, train_label, val_label
train_data, val_data, train_label, val_label = split_pos_neg_tweets(positive_tweets, negative_tweets)
print(f'Sample training data : {train_data[6000]}')
print(f'Sample training label : {train_label[6000]}')
print(f'Sample training data : {train_data[0]}')
print(f'Sample training label : {train_label[0]}')
```
# Tweet Transformation
```
stemmer = PorterStemmer()
def tweet_transform(tweets):
'''
Tokenize, remove stopword, remove hashtags and usernames, stem the words from tweets
'''
stop_words = stopwords.words('english')
tweet = re.sub(r'#','',tweets)
tweet = re.sub(r'\$\w*', '', tweet)
tweet = re.sub(r'https?:\/\/.*[\r\n]*','',tweet) ## remove any hyperlinks
tweet = re.sub(r'^RT[\s]+','',tweet) ## remove any Retweets (RT)
tokenizer = TweetTokenizer(preserve_case=True, reduce_len=False)
tweet_tokenise = tokenizer.tokenize(tweet)
cleaned_tweets = []
for t in tweet_tokenise:
if t not in stop_words and t[0] != '@': ## ignore stopwords and usernames
stemmed_word = stemmer.stem(t) ## stem the words
cleaned_tweets.append(stemmed_word)
return cleaned_tweets
print(f'Original Tweet: \n {train_data[0]} \n')
print(f'Transformed Tweet: \n {tweet_transform(train_data[0])}')
print(f'Original Tweet: \n {train_data[6000]} \n')
print(f'Transformed Tweet: \n {tweet_transform(train_data[6000])}')
```
# Creating word vocabulary
```
def tweet_vocab(tweets):
'''
The vocabulary of the tweet.
'''
vocab = {'__PAD__':0, '__</e>__':1, '__UNK__':2}
for tweet in tweets:
processed_tweet = tweet_transform(tweet)
for word in processed_tweet:
if word not in vocab:
vocab[word] = len(vocab)
return vocab
vocab = tweet_vocab(train_data)
print(f'Total vocabulary : {len(vocab)}')
train_df = pd.DataFrame({"data":train_data,"label":train_label})
val_df = pd.DataFrame({"data":val_data, "label":val_label})
train_df['transformed_data'] = train_df['data'].apply(lambda x: tweet_transform(x))
val_df['transformed_data'] = val_df['data'].apply(lambda x: tweet_transform(x))
train_df['transformed_data'].head()
```
# Convert to Tensor + Generator
```
def tweet_to_tensor(tweet, vocab, unknown_token = '__UNK__', verbose=False):
'''
Converts a tweet to tensors
'''
tensor = []
processed_tweet = tweet_transform(tweet)
UNK_id = vocab.get(unknown_token)
if verbose:
print(f'List of Processed Tweets')
print(processed_tweet)
for word in processed_tweet:
tensor.append(vocab.get(word,UNK_id))
return tensor
print(f'Actual Tweet : \n {val_data[0]}')
print(f'Tensor: {tweet_to_tensor(val_data[0],vocab,verbose=False)}')
train_df['transformed_data_tensor'] = train_df['data'].apply(lambda x: tweet_to_tensor(x,vocab))
val_df['transformed_data_tensor'] = val_df['data'].apply(lambda x: tweet_to_tensor(x,vocab))
train_df['transformed_data_tensor'][0]
train_df['label'][0]
val_df['transformed_data_tensor'][3]
class TweetDataset(Dataset):
'''
Dataset to process tweet
'''
def __init__(self, data, train=True):
self.data = data
self.train = train
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
X = self.data['transformed_data_tensor'][idx]
if self.train:
y = self.data['label'][idx]
X = torch.tensor(X)
if self.train:
return X,y
else:
return X
train_dataset = TweetDataset(train_df, train=True)
val_dataset = TweetDataset(val_df, train=True)
def collate_fn_tweet(batch):
'''
Lookup the batch length and pads it with zeros. Essential for DataLoader in order to be of same size.
Applicable for Train and Validation Tweets.
Input:
Batch: sequence of data from the dataloader
Output:
transformed_data: list of tensors of equal lengths of padded zero's.
'''
max_batch_length = max([ len(b[0]) for b in batch ])
data = torch.LongTensor([list(data.numpy()) + [0] * (max_batch_length - data.shape[0]) for data, label in batch])
label = torch.LongTensor([label for data, label in batch]).view(len(data),-1)
#label = torch.Tensor([label for data, label in batch])
return data, label
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True, collate_fn=collate_fn_tweet)
val_loader = DataLoader(val_dataset, batch_size=10, shuffle=True, collate_fn=collate_fn_tweet)
for b, tl in enumerate(train_loader):
break
X_train, y_train = tl
print('Data :',X_train, '\nLabel:', y_train)
for b, p in enumerate(train_loader):
break
p[1].shape
for b, val_tl in enumerate(val_loader):
break
X_val, y_val = val_tl
print('Data :',X_val,'\nLabel:',y_val)
```
# Define Model
```
class TweetDNNModel(nn.Module):
def __init__(self, input_dim=len(vocab), embedding_dim=256, output_dim=1):
super().__init__()
self.embeddings = nn.Embedding(input_dim, embedding_dim)
self.fc1 = nn.Linear(embedding_dim, 120)
self.fc2 = nn.Linear(120, 80)
self.fc3 = nn.Linear(80, output_dim)
def forward(self,x):
x = self.embeddings(x)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = nn.functional.log_softmax(self.fc3(x),dim=1)
return x
model = TweetDNNModel()
model
criterions = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
```
# Training the model
```
epochs = 10
train_losses = []
test_losses = []
train_corrects = []
test_corrects = []
for i in range(epochs):
train_corr = 0
test_corr = 0
for b, (X_train, y_train) in enumerate(train_loader):
b+=1
y_predict = model(X_train)
loss = criterions(y_predict, y_train)
actual_predicted = np.where(torch.argmax(y_predict,1)>0,1,0)
train_corr += (actual_predicted == y_train.reshape(-1,1).numpy()).sum()
#print(train_corr)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if b%100 == 0:
print(f'Epoch:{i} \t Batch:{b} [{10*b}/8000] \t Loss:{loss.item():10.8f} \t Accuracy:{10*(train_corr/b):10.3f}')
train_losses.append(loss)
train_corrects.append(train_corr)
## on the validation dataset
with torch.no_grad():
for b, (X_val, y_val) in enumerate(val_loader):
val_y_predict = model(X_val)
val_predicted = np.where(torch.argmax(val_y_predict,1)>0,1,0)
test_corr += (val_predicted == y_val.reshape(-1,1).numpy()).sum()
val_loss = criterions(val_y_predict, y_val)
test_losses.append(val_loss)
test_corrects.append(test_corr)
plt.plot(range(epochs), train_losses, label='Train Loss')
plt.plot(range(epochs), test_losses, label='Test Loss')
plt.legend()
plt.title('Loss Chart')
plt.show()
plt.plot([(c/8000)*100 for c in train_corrects],label="train accuracy")
plt.plot([(c/2000)*100 for c in test_corrects],label="test accuracy")
plt.legend()
plt.show()
## saving the model
torch.save(model.state_dict(),'tweet_sentiment_model.pt')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import nltk
from nltk.corpus import twitter_samples
nltk.download('twitter_samples')
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from nltk.stem import PorterStemmer
import re
def load_tweet():
'''
Load the positive and negative tweets
'''
positive_tweets = twitter_samples.strings('positive_tweets.json')
negative_tweets = twitter_samples.strings('negative_tweets.json')
return positive_tweets, negative_tweets
positive_tweets, negative_tweets = load_tweet()
print(f'Positive Tweets length: {len(positive_tweets)}')
print(f'Negative Tweets length: {len(negative_tweets)}')
## splitting the positive and negative tweets in 80:20 split
def split_pos_neg_tweets(pos_tweets, neg_tweets, split=0.8):
'''
Splits the positive and negative tweets and returns training and val_test datasets
'''
max_train_rows = int(len(pos_tweets) * split)
print(f'Splitting the dataset in the ratio: {split}')
train_pos = pos_tweets[:max_train_rows]
val_pos = pos_tweets[max_train_rows:]
train_neg = neg_tweets[:max_train_rows]
val_neg = neg_tweets[max_train_rows:]
train_label = np.append(np.ones(len(train_pos)), np.zeros(len(train_neg)))
val_label = np.append(np.ones(len(val_pos)), np.zeros(len(val_neg)))
print(f'Total Training Rows (pos+neg) : {len(train_pos + train_neg)}')
print(f'Total Validation Rows (pos+neg): {len(val_pos + val_neg)}')
return train_pos + train_neg , val_pos + val_neg, train_label, val_label
train_data, val_data, train_label, val_label = split_pos_neg_tweets(positive_tweets, negative_tweets)
print(f'Sample training data : {train_data[6000]}')
print(f'Sample training label : {train_label[6000]}')
print(f'Sample training data : {train_data[0]}')
print(f'Sample training label : {train_label[0]}')
stemmer = PorterStemmer()
def tweet_transform(tweets):
'''
Tokenize, remove stopword, remove hashtags and usernames, stem the words from tweets
'''
stop_words = stopwords.words('english')
tweet = re.sub(r'#','',tweets)
tweet = re.sub(r'\$\w*', '', tweet)
tweet = re.sub(r'https?:\/\/.*[\r\n]*','',tweet) ## remove any hyperlinks
tweet = re.sub(r'^RT[\s]+','',tweet) ## remove any Retweets (RT)
tokenizer = TweetTokenizer(preserve_case=True, reduce_len=False)
tweet_tokenise = tokenizer.tokenize(tweet)
cleaned_tweets = []
for t in tweet_tokenise:
if t not in stop_words and t[0] != '@': ## ignore stopwords and usernames
stemmed_word = stemmer.stem(t) ## stem the words
cleaned_tweets.append(stemmed_word)
return cleaned_tweets
print(f'Original Tweet: \n {train_data[0]} \n')
print(f'Transformed Tweet: \n {tweet_transform(train_data[0])}')
print(f'Original Tweet: \n {train_data[6000]} \n')
print(f'Transformed Tweet: \n {tweet_transform(train_data[6000])}')
def tweet_vocab(tweets):
'''
The vocabulary of the tweet.
'''
vocab = {'__PAD__':0, '__</e>__':1, '__UNK__':2}
for tweet in tweets:
processed_tweet = tweet_transform(tweet)
for word in processed_tweet:
if word not in vocab:
vocab[word] = len(vocab)
return vocab
vocab = tweet_vocab(train_data)
print(f'Total vocabulary : {len(vocab)}')
train_df = pd.DataFrame({"data":train_data,"label":train_label})
val_df = pd.DataFrame({"data":val_data, "label":val_label})
train_df['transformed_data'] = train_df['data'].apply(lambda x: tweet_transform(x))
val_df['transformed_data'] = val_df['data'].apply(lambda x: tweet_transform(x))
train_df['transformed_data'].head()
def tweet_to_tensor(tweet, vocab, unknown_token = '__UNK__', verbose=False):
'''
Converts a tweet to tensors
'''
tensor = []
processed_tweet = tweet_transform(tweet)
UNK_id = vocab.get(unknown_token)
if verbose:
print(f'List of Processed Tweets')
print(processed_tweet)
for word in processed_tweet:
tensor.append(vocab.get(word,UNK_id))
return tensor
print(f'Actual Tweet : \n {val_data[0]}')
print(f'Tensor: {tweet_to_tensor(val_data[0],vocab,verbose=False)}')
train_df['transformed_data_tensor'] = train_df['data'].apply(lambda x: tweet_to_tensor(x,vocab))
val_df['transformed_data_tensor'] = val_df['data'].apply(lambda x: tweet_to_tensor(x,vocab))
train_df['transformed_data_tensor'][0]
train_df['label'][0]
val_df['transformed_data_tensor'][3]
class TweetDataset(Dataset):
'''
Dataset to process tweet
'''
def __init__(self, data, train=True):
self.data = data
self.train = train
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
X = self.data['transformed_data_tensor'][idx]
if self.train:
y = self.data['label'][idx]
X = torch.tensor(X)
if self.train:
return X,y
else:
return X
train_dataset = TweetDataset(train_df, train=True)
val_dataset = TweetDataset(val_df, train=True)
def collate_fn_tweet(batch):
'''
Lookup the batch length and pads it with zeros. Essential for DataLoader in order to be of same size.
Applicable for Train and Validation Tweets.
Input:
Batch: sequence of data from the dataloader
Output:
transformed_data: list of tensors of equal lengths of padded zero's.
'''
max_batch_length = max([ len(b[0]) for b in batch ])
data = torch.LongTensor([list(data.numpy()) + [0] * (max_batch_length - data.shape[0]) for data, label in batch])
label = torch.LongTensor([label for data, label in batch]).view(len(data),-1)
#label = torch.Tensor([label for data, label in batch])
return data, label
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True, collate_fn=collate_fn_tweet)
val_loader = DataLoader(val_dataset, batch_size=10, shuffle=True, collate_fn=collate_fn_tweet)
for b, tl in enumerate(train_loader):
break
X_train, y_train = tl
print('Data :',X_train, '\nLabel:', y_train)
for b, p in enumerate(train_loader):
break
p[1].shape
for b, val_tl in enumerate(val_loader):
break
X_val, y_val = val_tl
print('Data :',X_val,'\nLabel:',y_val)
class TweetDNNModel(nn.Module):
def __init__(self, input_dim=len(vocab), embedding_dim=256, output_dim=1):
super().__init__()
self.embeddings = nn.Embedding(input_dim, embedding_dim)
self.fc1 = nn.Linear(embedding_dim, 120)
self.fc2 = nn.Linear(120, 80)
self.fc3 = nn.Linear(80, output_dim)
def forward(self,x):
x = self.embeddings(x)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = nn.functional.log_softmax(self.fc3(x),dim=1)
return x
model = TweetDNNModel()
model
criterions = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
epochs = 10
train_losses = []
test_losses = []
train_corrects = []
test_corrects = []
for i in range(epochs):
train_corr = 0
test_corr = 0
for b, (X_train, y_train) in enumerate(train_loader):
b+=1
y_predict = model(X_train)
loss = criterions(y_predict, y_train)
actual_predicted = np.where(torch.argmax(y_predict,1)>0,1,0)
train_corr += (actual_predicted == y_train.reshape(-1,1).numpy()).sum()
#print(train_corr)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if b%100 == 0:
print(f'Epoch:{i} \t Batch:{b} [{10*b}/8000] \t Loss:{loss.item():10.8f} \t Accuracy:{10*(train_corr/b):10.3f}')
train_losses.append(loss)
train_corrects.append(train_corr)
## on the validation dataset
with torch.no_grad():
for b, (X_val, y_val) in enumerate(val_loader):
val_y_predict = model(X_val)
val_predicted = np.where(torch.argmax(val_y_predict,1)>0,1,0)
test_corr += (val_predicted == y_val.reshape(-1,1).numpy()).sum()
val_loss = criterions(val_y_predict, y_val)
test_losses.append(val_loss)
test_corrects.append(test_corr)
plt.plot(range(epochs), train_losses, label='Train Loss')
plt.plot(range(epochs), test_losses, label='Test Loss')
plt.legend()
plt.title('Loss Chart')
plt.show()
plt.plot([(c/8000)*100 for c in train_corrects],label="train accuracy")
plt.plot([(c/2000)*100 for c in test_corrects],label="test accuracy")
plt.legend()
plt.show()
## saving the model
torch.save(model.state_dict(),'tweet_sentiment_model.pt')
| 0.632162 | 0.750598 |
```
#build dataframe for data
DATA = 'prefixed_train.xlsx'
df = pd.read_excel (DATA)
print (df)
#shuffle data
df = df.sample(frac=1).reset_index(drop=True)
df
cols=[0,5,6,9]
X = df.drop(df.columns[cols], axis=1)
X
cnt_df = pd.read_excel('train_depth.xlsx')
def decide_parents_by_depth(cnt_df, X):
chemical_to_depth={}
for idx, row in cnt_df.iterrows():
if row['Chemical'] not in chemical_to_depth:
chemical_to_depth[row['Chemical']]=[]
chemical_to_depth[row['Chemical']] .append(row['Depth'])
chemical_to_depth[row['Chemical']]=list(set(chemical_to_depth[row['Chemical']]))
#add depth as a feature
depth=[]
for idx,row in X.iterrows():
d = chemical_to_depth[row['Chemical']]
string_d = [str(elem) for elem in d]
depth.append(string_d)
X['Depth']=depth
depth_X= X[['Depth']].join(X.Depth.str.join('|').str.get_dummies().add_prefix('depth_'))
X = pd.concat([X,depth_X],axis=1) #merge them together
X = X.drop(['Depth'], axis=1)
X
return X
def drop_parents_by_depth(cnt_df, not_remove_cols,X):
#drop the chemicals by the depth
toRemove=[]
#not_remove_col=['depth_4','depth_5','depth_6','depth_7','depth_8','depth_9','depth_10']
not_remove_col = not_remove_cols
for idx, row in X.iterrows():
if row['Appeared']==0 and row['Indexing(T/F)']==0: #added as a parent but is not an indexing chemical
if row['depth_0']==1 or row['depth_1']==1 or row['depth_2']==1 or row['depth_3']==1:
remove=True
for col in not_remove_col: #check if it is not contained to another column
if row[col]==1:
remove=False
break
if remove is True:
toRemove.append(idx)
#drop
X= X.drop(X.index[toRemove])
X = X.loc[:, ~X.columns.str.startswith('depth')]
return X
X=decide_parents_by_depth(cnt_df,X)
X
#TO CHANGE:
#not_remove_col=['depth_4','depth_5','depth_6','depth_7','depth_8','depth_9'] NLMCHEM
not_remove_col=['depth_4','depth_5','depth_6','depth_7','depth_8','depth_9','depth_10'] #BC5CDR
X = drop_parents_by_depth(cnt_df,not_remove_col,X )
y = X['Indexing(T/F)']
X = X.drop(['Chemical','Appeared','Indexing(T/F)'], axis=1) #use the hierarchy prefix instead of the chemical identifier
X
#Target encoding - after the split
!pip install --upgrade category_encoders
from category_encoders import TargetEncoder
encoder = TargetEncoder()
#X is training data
X = encoder.fit_transform(X,y)
#to handle class imbalance
#equally penalize under or over-represented classes in the training set.
from sklearn.utils.class_weight import compute_class_weight
class_weights = compute_class_weight(class_weight='balanced', classes=np.unique(y),y=y)
print("Class Weights:",class_weights)
class_weight = { 0: class_weights[0] , 1: class_weights[1]}
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y,
stratify=y, #set the class ratio equal as the original training set
test_size=0.20)
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
from keras.models import load_model
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_fscore', patience=5, verbose=1,factor=0.5, min_lr=0.0001)
from sklearn.model_selection import RandomizedSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD,Adam,RMSprop
from keras.regularizers import l2
#creating model
def create_model(optimizer='adam', activation = 'relu',units=1, hidden_layers = 1, regularizer=0.0, learning_rate = 0.1, dropout=False):
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(units,input_dim = X.shape[1], activation=activation,kernel_initializer='he_uniform'))
# Add one hidden layer
for i in range(hidden_layers):
model.add(Dense(units*(i+2),activation=activation, kernel_regularizer=l2(regularizer)))
if dropout is True:
model.add(Dropout(0.2))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
#add an optimizer
if optimizer is 'adam':
optimizer = Adam(learning_rate=learning_rate)
elif optimizer is 'sgd':
optimizer=SGD(learning_rate=lr)
elif optimizer is 'rmsprop':
optimizer = RMSprop(learning_rate=learning_rate)
#compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model = create_model(units=20, hidden_layers = 3, dropout=False, learning_rate=0.0003)
history = model.fit(X_train, y_train, batch_size=64, epochs=15, validation_data=(X_val,y_val), shuffle=True, callbacks=[callback,learning_rate_reduction], class_weight = class_weight)
model.save('nlm+cdr_trained_model.h5')
#Evaluation on the dev set
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
y_pred1 = model.predict(X_test)
y_pred=[]
for elem in y_pred1:
if elem>0.50:
y_pred.append([1])
else:
y_pred.append([0])
def get_test_performance(answer,pred):
#print precision, recall
print("Precision:",metrics.precision_score(answer, pred))
print("Recall:",metrics.recall_score(answer, pred))
print("F1:",metrics.f1_score(answer,pred))
#printing out confusion matrix
print(confusion_matrix(answer,pred))
print(classification_report(answer,pred))
get_test_performance(y_test,y_pred)
```
|
github_jupyter
|
#build dataframe for data
DATA = 'prefixed_train.xlsx'
df = pd.read_excel (DATA)
print (df)
#shuffle data
df = df.sample(frac=1).reset_index(drop=True)
df
cols=[0,5,6,9]
X = df.drop(df.columns[cols], axis=1)
X
cnt_df = pd.read_excel('train_depth.xlsx')
def decide_parents_by_depth(cnt_df, X):
chemical_to_depth={}
for idx, row in cnt_df.iterrows():
if row['Chemical'] not in chemical_to_depth:
chemical_to_depth[row['Chemical']]=[]
chemical_to_depth[row['Chemical']] .append(row['Depth'])
chemical_to_depth[row['Chemical']]=list(set(chemical_to_depth[row['Chemical']]))
#add depth as a feature
depth=[]
for idx,row in X.iterrows():
d = chemical_to_depth[row['Chemical']]
string_d = [str(elem) for elem in d]
depth.append(string_d)
X['Depth']=depth
depth_X= X[['Depth']].join(X.Depth.str.join('|').str.get_dummies().add_prefix('depth_'))
X = pd.concat([X,depth_X],axis=1) #merge them together
X = X.drop(['Depth'], axis=1)
X
return X
def drop_parents_by_depth(cnt_df, not_remove_cols,X):
#drop the chemicals by the depth
toRemove=[]
#not_remove_col=['depth_4','depth_5','depth_6','depth_7','depth_8','depth_9','depth_10']
not_remove_col = not_remove_cols
for idx, row in X.iterrows():
if row['Appeared']==0 and row['Indexing(T/F)']==0: #added as a parent but is not an indexing chemical
if row['depth_0']==1 or row['depth_1']==1 or row['depth_2']==1 or row['depth_3']==1:
remove=True
for col in not_remove_col: #check if it is not contained to another column
if row[col]==1:
remove=False
break
if remove is True:
toRemove.append(idx)
#drop
X= X.drop(X.index[toRemove])
X = X.loc[:, ~X.columns.str.startswith('depth')]
return X
X=decide_parents_by_depth(cnt_df,X)
X
#TO CHANGE:
#not_remove_col=['depth_4','depth_5','depth_6','depth_7','depth_8','depth_9'] NLMCHEM
not_remove_col=['depth_4','depth_5','depth_6','depth_7','depth_8','depth_9','depth_10'] #BC5CDR
X = drop_parents_by_depth(cnt_df,not_remove_col,X )
y = X['Indexing(T/F)']
X = X.drop(['Chemical','Appeared','Indexing(T/F)'], axis=1) #use the hierarchy prefix instead of the chemical identifier
X
#Target encoding - after the split
!pip install --upgrade category_encoders
from category_encoders import TargetEncoder
encoder = TargetEncoder()
#X is training data
X = encoder.fit_transform(X,y)
#to handle class imbalance
#equally penalize under or over-represented classes in the training set.
from sklearn.utils.class_weight import compute_class_weight
class_weights = compute_class_weight(class_weight='balanced', classes=np.unique(y),y=y)
print("Class Weights:",class_weights)
class_weight = { 0: class_weights[0] , 1: class_weights[1]}
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y,
stratify=y, #set the class ratio equal as the original training set
test_size=0.20)
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
from keras.models import load_model
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_fscore', patience=5, verbose=1,factor=0.5, min_lr=0.0001)
from sklearn.model_selection import RandomizedSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD,Adam,RMSprop
from keras.regularizers import l2
#creating model
def create_model(optimizer='adam', activation = 'relu',units=1, hidden_layers = 1, regularizer=0.0, learning_rate = 0.1, dropout=False):
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(units,input_dim = X.shape[1], activation=activation,kernel_initializer='he_uniform'))
# Add one hidden layer
for i in range(hidden_layers):
model.add(Dense(units*(i+2),activation=activation, kernel_regularizer=l2(regularizer)))
if dropout is True:
model.add(Dropout(0.2))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
#add an optimizer
if optimizer is 'adam':
optimizer = Adam(learning_rate=learning_rate)
elif optimizer is 'sgd':
optimizer=SGD(learning_rate=lr)
elif optimizer is 'rmsprop':
optimizer = RMSprop(learning_rate=learning_rate)
#compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
model = create_model(units=20, hidden_layers = 3, dropout=False, learning_rate=0.0003)
history = model.fit(X_train, y_train, batch_size=64, epochs=15, validation_data=(X_val,y_val), shuffle=True, callbacks=[callback,learning_rate_reduction], class_weight = class_weight)
model.save('nlm+cdr_trained_model.h5')
#Evaluation on the dev set
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
y_pred1 = model.predict(X_test)
y_pred=[]
for elem in y_pred1:
if elem>0.50:
y_pred.append([1])
else:
y_pred.append([0])
def get_test_performance(answer,pred):
#print precision, recall
print("Precision:",metrics.precision_score(answer, pred))
print("Recall:",metrics.recall_score(answer, pred))
print("F1:",metrics.f1_score(answer,pred))
#printing out confusion matrix
print(confusion_matrix(answer,pred))
print(classification_report(answer,pred))
get_test_performance(y_test,y_pred)
| 0.41052 | 0.553264 |
```
import seaborn as sns
import matplotlib.pyplot as plt
%run utils/mlflow_query.py
%run utils/loading.py
%run utils/comparison.py
%run utils/ranks.py
mlflow_helper = MlflowHelper(pkl_file=Path("mlflow_run_df.pkl"))
mlflow_helper.query_all_runs(pkl_file=Path("mlflow_run_df.pkl"))
relevant_mimic_run_df = mlflow_helper.mimic_run_df(include_noise=False, include_refinements=False)
mimic_simple_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'simple') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
mimic_gram_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'gram') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
mimic_text_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'text') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
mimic_causal_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'causal') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
print('Simple', mimic_simple_false_run_id, 'Gram', mimic_gram_false_run_id, 'Text', mimic_text_false_run_id, 'Causal', mimic_causal_false_run_id)
len(relevant_mimic_run_df)
relevant_huawei_run_df = mlflow_helper.huawei_run_df(include_noise=False, include_refinements=False)
huawei_simple_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'simple') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
huawei_gram_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'gram') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
huawei_text_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'text') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
huawei_causal_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'causal') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
print('Simple', huawei_simple_false_run_id, 'Gram', huawei_gram_false_run_id, 'Text', huawei_text_false_run_id, 'Causal', huawei_causal_false_run_id)
len(relevant_huawei_run_df)
```
# Comparing Baseline vs GRAM
```
comparison.comparison_df.columns
suffix_1 = '_simple_false'
suffix_2='_gram_false'
#comparison = Comparison(
# run_id_1=mimic_simple_false_run_id,
# suffix_1=suffix_1,
# run_id_2=mimic_gram_false_run_id,
# suffix_2=suffix_2,
# local_mlflow_dir=mlflow_helper.local_mlflow_dir,
# num_percentiles=10)
plot_rank_comparison(comparison, color="avg_input_frequencies_percentile")
#plot_outlier_distances(comparison)
#analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1, descriptions=load_icd9_text())
suffix_1 = '_simple_false'
suffix_2='_gram_false'
comparison = Comparison(
run_id_1=huawei_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=huawei_gram_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison, color="avg_input_frequencies_percentile")
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1)
```
# Comparing Baseline vs TEXT
```
suffix_1 = '_simple_false'
suffix_2='_text_false'
comparison = Comparison(
run_id_1=mimic_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=mimic_text_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison)
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1, descriptions=load_icd9_text())
suffix_1 = '_simple_false'
suffix_2='_text_false'
comparison = Comparison(
run_id_1=huawei_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=huawei_text_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison)
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1)
```
# Comparing Baseline vs CAUSAL
```
suffix_1 = '_simple_false'
suffix_2='_causal_false'
comparison = Comparison(
run_id_1=mimic_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=mimic_causal_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison, color="avg_input_frequencies_percentile")
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1, descriptions=load_icd9_text())
comparison.comparison_df["output"]
comparison.comparison_df["output"].iloc[0]
print([
(input, comparison.attention_weights_for("_causal_false").get(input.strip()))
for input in comparison.comparison_df["inputs"].iloc[0].split(",")
])
suffix_1 = '_simple_false'
suffix_2='_causal_false'
comparison = Comparison(
run_id_1=huawei_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=huawei_causal_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison)
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1)
```
# Rank comparison per Input/Output Frequency Percentile
```
full_comparison_df = calculate_rank_comparisons(
relevant_dfs = [relevant_mimic_run_df, relevant_huawei_run_df],
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10,
)
g = sns.relplot(data=full_comparison_df[
(full_comparison_df["aggregation"] == "mean") &
(full_comparison_df["metric"] != "output_frequency_percentile") &
(full_comparison_df["data_tags_sequence_type"] == "mimic") &
(full_comparison_df["data_tags_sequence_type_2"] == "mimic") &
(full_comparison_df["data_tags_model_type_2"] == "simple") &
(full_comparison_df["data_tags_model_type"] != "simple") &
(full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable_2"] == full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable"])
],
x="percentile",
y="value",
row="data_tags_model_type",
col='metric',
hue="data_params_ModelConfigbase_feature_embeddings_trainable",
style="data_params_ModelConfigbase_feature_embeddings_trainable_2",
kind="line",
)
g.map(plt.axhline, y=0, color=".7", dashes=(2, 1), zorder=0)
plt.show()
g = sns.relplot(data=full_comparison_df[
(full_comparison_df["aggregation"] == "mean") &
(full_comparison_df["metric"] == "output_frequency_percentile") &
(full_comparison_df["data_tags_sequence_type"] == full_comparison_df["data_tags_sequence_type_2"]) &
(full_comparison_df["data_tags_model_type_2"] == "simple") &
(full_comparison_df["data_tags_model_type"] != "simple") &
(full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable_2"] == full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable"])
],
x="percentile",
y="value",
row="data_tags_model_type",
col='data_tags_sequence_type',
hue="data_params_ModelConfigbase_feature_embeddings_trainable",
kind="line",
)
g.map(plt.axhline, y=0, color=".7", dashes=(2, 1), zorder=0)
plt.show()
g = sns.relplot(data=full_comparison_df[
(full_comparison_df["aggregation"] == "mean") &
(full_comparison_df["metric"] != "output_frequency_percentile") &
(full_comparison_df["data_tags_sequence_type"] == "huawei_logs") &
(full_comparison_df["data_tags_sequence_type_2"] == "huawei_logs") &
(full_comparison_df["data_tags_model_type_2"] == "simple") &
(full_comparison_df["data_tags_model_type"] != "simple") &
(full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable_2"] == full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable"])
],
x="percentile",
y="value",
row="data_tags_model_type",
col='metric',
hue="data_params_ModelConfigbase_feature_embeddings_trainable",
kind="line",
)
g.map(plt.axhline, y=0, color=".7", dashes=(2, 1), zorder=0)
plt.show()
```
|
github_jupyter
|
import seaborn as sns
import matplotlib.pyplot as plt
%run utils/mlflow_query.py
%run utils/loading.py
%run utils/comparison.py
%run utils/ranks.py
mlflow_helper = MlflowHelper(pkl_file=Path("mlflow_run_df.pkl"))
mlflow_helper.query_all_runs(pkl_file=Path("mlflow_run_df.pkl"))
relevant_mimic_run_df = mlflow_helper.mimic_run_df(include_noise=False, include_refinements=False)
mimic_simple_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'simple') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
mimic_gram_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'gram') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
mimic_text_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'text') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
mimic_causal_false_run_id = relevant_mimic_run_df[
(relevant_mimic_run_df['data_tags_model_type'] == 'causal') &
(relevant_mimic_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
print('Simple', mimic_simple_false_run_id, 'Gram', mimic_gram_false_run_id, 'Text', mimic_text_false_run_id, 'Causal', mimic_causal_false_run_id)
len(relevant_mimic_run_df)
relevant_huawei_run_df = mlflow_helper.huawei_run_df(include_noise=False, include_refinements=False)
huawei_simple_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'simple') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
huawei_gram_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'gram') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
huawei_text_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'text') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
huawei_causal_false_run_id = relevant_huawei_run_df[
(relevant_huawei_run_df['data_tags_model_type'] == 'causal') &
(relevant_huawei_run_df['data_params_ModelConfigbase_hidden_embeddings_trainable'] == 'False')
].iloc[0].get('info_run_id')
print('Simple', huawei_simple_false_run_id, 'Gram', huawei_gram_false_run_id, 'Text', huawei_text_false_run_id, 'Causal', huawei_causal_false_run_id)
len(relevant_huawei_run_df)
comparison.comparison_df.columns
suffix_1 = '_simple_false'
suffix_2='_gram_false'
#comparison = Comparison(
# run_id_1=mimic_simple_false_run_id,
# suffix_1=suffix_1,
# run_id_2=mimic_gram_false_run_id,
# suffix_2=suffix_2,
# local_mlflow_dir=mlflow_helper.local_mlflow_dir,
# num_percentiles=10)
plot_rank_comparison(comparison, color="avg_input_frequencies_percentile")
#plot_outlier_distances(comparison)
#analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1, descriptions=load_icd9_text())
suffix_1 = '_simple_false'
suffix_2='_gram_false'
comparison = Comparison(
run_id_1=huawei_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=huawei_gram_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison, color="avg_input_frequencies_percentile")
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1)
suffix_1 = '_simple_false'
suffix_2='_text_false'
comparison = Comparison(
run_id_1=mimic_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=mimic_text_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison)
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1, descriptions=load_icd9_text())
suffix_1 = '_simple_false'
suffix_2='_text_false'
comparison = Comparison(
run_id_1=huawei_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=huawei_text_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison)
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1)
suffix_1 = '_simple_false'
suffix_2='_causal_false'
comparison = Comparison(
run_id_1=mimic_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=mimic_causal_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison, color="avg_input_frequencies_percentile")
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1, descriptions=load_icd9_text())
comparison.comparison_df["output"]
comparison.comparison_df["output"].iloc[0]
print([
(input, comparison.attention_weights_for("_causal_false").get(input.strip()))
for input in comparison.comparison_df["inputs"].iloc[0].split(",")
])
suffix_1 = '_simple_false'
suffix_2='_causal_false'
comparison = Comparison(
run_id_1=huawei_simple_false_run_id,
suffix_1=suffix_1,
run_id_2=huawei_causal_false_run_id,
suffix_2=suffix_2,
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10)
plot_rank_comparison(comparison)
plot_outlier_distances(comparison)
analyse_best_worst_sequences(comparison, num_best_sequences=1, num_worst_sequences=1)
full_comparison_df = calculate_rank_comparisons(
relevant_dfs = [relevant_mimic_run_df, relevant_huawei_run_df],
local_mlflow_dir=mlflow_helper.local_mlflow_dir,
num_percentiles=10,
)
g = sns.relplot(data=full_comparison_df[
(full_comparison_df["aggregation"] == "mean") &
(full_comparison_df["metric"] != "output_frequency_percentile") &
(full_comparison_df["data_tags_sequence_type"] == "mimic") &
(full_comparison_df["data_tags_sequence_type_2"] == "mimic") &
(full_comparison_df["data_tags_model_type_2"] == "simple") &
(full_comparison_df["data_tags_model_type"] != "simple") &
(full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable_2"] == full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable"])
],
x="percentile",
y="value",
row="data_tags_model_type",
col='metric',
hue="data_params_ModelConfigbase_feature_embeddings_trainable",
style="data_params_ModelConfigbase_feature_embeddings_trainable_2",
kind="line",
)
g.map(plt.axhline, y=0, color=".7", dashes=(2, 1), zorder=0)
plt.show()
g = sns.relplot(data=full_comparison_df[
(full_comparison_df["aggregation"] == "mean") &
(full_comparison_df["metric"] == "output_frequency_percentile") &
(full_comparison_df["data_tags_sequence_type"] == full_comparison_df["data_tags_sequence_type_2"]) &
(full_comparison_df["data_tags_model_type_2"] == "simple") &
(full_comparison_df["data_tags_model_type"] != "simple") &
(full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable_2"] == full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable"])
],
x="percentile",
y="value",
row="data_tags_model_type",
col='data_tags_sequence_type',
hue="data_params_ModelConfigbase_feature_embeddings_trainable",
kind="line",
)
g.map(plt.axhline, y=0, color=".7", dashes=(2, 1), zorder=0)
plt.show()
g = sns.relplot(data=full_comparison_df[
(full_comparison_df["aggregation"] == "mean") &
(full_comparison_df["metric"] != "output_frequency_percentile") &
(full_comparison_df["data_tags_sequence_type"] == "huawei_logs") &
(full_comparison_df["data_tags_sequence_type_2"] == "huawei_logs") &
(full_comparison_df["data_tags_model_type_2"] == "simple") &
(full_comparison_df["data_tags_model_type"] != "simple") &
(full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable_2"] == full_comparison_df["data_params_ModelConfigbase_feature_embeddings_trainable"])
],
x="percentile",
y="value",
row="data_tags_model_type",
col='metric',
hue="data_params_ModelConfigbase_feature_embeddings_trainable",
kind="line",
)
g.map(plt.axhline, y=0, color=".7", dashes=(2, 1), zorder=0)
plt.show()
| 0.228243 | 0.406567 |
# Demystifying Neural Network
## Pytorch Tutorial 2
- Learn about `torch.nn`, `torch.optim`, `Dataset`, `DataLoader` features of Pytorch
### Intro
- In the last Pytorch tutorial (part 3 of the series), I wrote about basic tensor operations. In this tutorial, I constructed a neural network to understand its architecture with MNIST digit dataset. I used this [excellent source](https://pytorch.org/tutorials/beginner/nn_tutorial.html) from Pytorch. This tutorial helped me the best in understanding neural network architecture as it first starts with only using basic tensor operations, but then gradually adding features such as `torch.nn`, `torch.optim`, `Dataset`, `DataLoader`, so that one can see what each feature does. Since the source has excellent explanation, I tried to explain in Korean so that I am not merely just copying what is written in the tutorial.
- Pytorch tutorial 1 에서는 간단한 tensor operation만 공부해 보았다. 이번 tutorial에서는 neural network 구조에 대해 공부하기 위해 직접 pytorch로 MNIST 숫자 데이터를 가지고 neural network를 만드는 연습을 해 보았다. Pytorch에서 제공하는 [tutorial](https://pytorch.org/tutorials/beginner/nn_tutorial.html)을 보고 실습을 해 보았는데, pytorch 기능을 하나도 사용 하지 않고 시작하여, 하나씩 하나씩 pytorch기능들을 추가로 넣어줌으로서 pytorch feature들을 하나씩 알아볼 수 있는 아주 좋은 tutorial이니 꼭 확인 해 보시길!
```
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
from torch import nn, optim
from torch.autograd import Variable
from pathlib import Path
import requests
DATA_PATH = Path("data")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents=True, exist_ok=True)
URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open("wb").write(content)
import pickle
import gzip
with gzip.open((PATH/FILENAME).as_posix(), "rb") as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
```
- convert our loaded data to tensor object using `map` function
- `map` 함수를 통해 로드 된 데이터를 텐서 object로 변환해 줍니다
```
import torch
x_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid))
```
- 50000개의 hand written digit 이 있고, 28 x 28이미지는 flatten된 1d 텐서로 저장이 되어 있습니다 (1 row에 784)
- there are 50000 hand written digit images and each image is 28 x 28. Images are stored as a flattened 1d tensor (784)
```
# target value has 10 classes 0 to 9
x_train.shape, y_train.min(), y_train.max()
```
## Neural Net 만들기 Create a model only using Pytorch tensor operation
- 기본적 pytorch tensor operation만을 사용하여 neural network를 만들어 보자. Weights와 bias를 만들기 위해 pytorch의 `torch.randn`을 사용하여 weights와 bias를 Xavier initialization으로 만들어 준다
- Let's make a neural network only using basic tensor operations. First, we make weights and bias tensors by using `torch.randn` function. I will initialize weights with Xavier initalization.
- Then, I'm going to use log softmax function to get prediction value, then use negative log likelihood as a loss function
- log softmax 함수를 사용하여 예측값을 구하고, loss function으로는 softmax의 짝꿍인 negative log likelihood를 사용한다
```
import math
weights = torch.randn(784, 10) / math.sqrt(784) # Xavier initialization multiplies 1/sqrt(n)
```
- **after** initializing weights, we set `requires_grad_`. This function lets Pytorch to calculate the gradient during back propagation automatically.
- `requires_grad_`을 weights 텐서 object를 만든 후, 아래와 같이 실행을 시켜 준다 `requires_grad_`는 자동적으로 Pytorch가 back propagation을 할 때, gradient를 계산하도록 하는 스위치를 켜주는 역할을 한다
- `_` in Python signifies that the operation is performed in-place. There is also a different method to turn on `requires_grad` switch which is to give `requires_grad` parameter when creating tensor object.
- Pytorch에서는 함수 뒤에 `_`가 붙으면 `inplace`역할을 합니다. bias에 대한 requires_grad를 실행해 줍니다. `requires_grad` 스위치를 켜줄 때 bias에서 만든 tensor처럼 스위치를 생성할때 안에 parameter로 줄 수 있다
```
weights.requires_grad_()
bias = torch.zeros(10, requires_grad=True)
```
- I haven't seen `@` before I read this tutorial, but it stands for the dot product operation.
- `@`는 pytorch tutorial을 읽으면서 처음 보았는데 벡터의 내적을 뜻한다
```
def log_softmax(x):
return x - x.exp().sum(-1).log().unsqueeze(-1)
# sum(-1) represents row sum and unsqueeze(-1) adds an additional dimension to the last index
def model(xb):
return log_softmax(xb @ weights + bias) # @ represents dot product operation
```
- set batch size to 64. This represents one mini batch of 64 images. We call our function `model` on one batch of data. We just did one forward pass to get our predictions by passing in one batch of data.
- 배치 사이즈를 64로 설정을 한다. 배치사이즈는 2의 거듭제곱으로 나아간다. GPU 성능에 따라 배치사이즈를 늘려갈 수 있다. 배치사이즈를 설정하고 위의 `model` function에 넣어 prediction을 구하면, 1 forward pass를 마친 것이다
```
bs = 64 # batch size is normally power of 2 (64, 128, etc)
xb = x_train[0:bs] # one mini batch of 64 images
preds = model(xb)
print(preds[0], preds.shape)
```
#### negative log likelihood function
- 아래 nll의 return 값을 보면 `-input[range(target.shape[0]), target].mean()`라고 되어있는데 여기서 `input`은 prediction값, `range(target.shape[0])`는 batch size인 64, `target`은 실제 target값이다. `input` 값이 log softmax로 prediction된 값 이므로 batch 64개의 `target` tensor object로 지정하여 index를 지정해 주면, 그 지정된 값으로 평균을 계산하여 negative log likelihood 값을 준다
```
def nll(input, target):
return -input[range(target.shape[0]), target].mean() #target이 yb이고 각 index에 있는 것 mean
loss_func = nll # set log likelihood as a loss function
yb = y_train[0:bs]
print(loss_func(preds, yb))
print(loss_func(preds, yb).dtype, preds.shape, yb.shape)
```
#### accuracy
- for each prediction, if the index with the largest value matches the target value, then the prediction is correct. to caculate accuracy of our neural network, we get that
- accuracy 계산은 `torch.argmax`를 통해 prediction값 중, 가장 큰 값의 index가 target값과 일치할 때의 값을 평균낸다
```
a = torch.Tensor([1,2,3,4,5,6,1,2])
torch.argmax(a) # gives you the index
def accuracy(out, yb):
preds = torch.argmax(out, dim=1)
return (preds==yb).float().mean()
print(accuracy(preds, yb)) # 처음에 설정한 weight 값이 random이므로 첫 batch를 돌린 후 accuracy가 낮음을 확인할 수 있다
```
### Training loop. For each iteration :
### Training loop 돌기 :
- epoch별, batch별로 loop을 돌려야 하므로 각 이터레이션에서 해야하는 일은:
- batch size로 feed forward해줄 구간 설정
- feed forward로 예측값 내기
- loss 계산하기
- `loss.backward()`로 그레디언트를 계산하기 (`weight`과 `bias`의 gradient가 된다)
- select mini-batch of data
- use the model to make predictions (feed forward)
- calculate the loss
- `loss.backward()` updates the gradients of the model, in this case, `weights` and `bias`
**`torch.no_grad()`** 안에서 weights과 bias의 gradient들을 update해야 한다. 그 이유는 아래의 부분이 gradient를 계산하는데 있어서 영향을 주면 안되기 때문이다. weights와 bias를 업데이트 하기 위함이지 gradient계산은 `loss.backward()`에서 끝났다.
```
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
```
그 후, 마지막 단계에서 gradient들이 쌓여 다음 배치에 영향을 주는 것을 방지 하기위해 **`grad.zero_()`** 를 사용하여 gradient를 0으로 재 설정 시켜준다.
```
n, c = x_train.shape
from IPython.core.debugger import set_trace #set_trace is useful for debugging in python
lr = 0.5
epochs = 2
for epoch in range(epochs):
for i in range((n-1)//bs+1):
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i: end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
# because we do not want below actions to be recorded for our next calculation of the gradient
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
```
- without any hidden layer, we trained our first neural network. Let's compare the loss and accuracy after training and then those after only one batch of training.
- hidden layer없이 첫 neural network를 train해 보았다. 전에 구했던 loss와 accuracy를 train이 끝난 후의 loss와 accuracy와 비교해 보자
```
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
```
### torch.nn.functional 사용하여 neural network 만들기
### using torch.nn.functional to build a neural network
- 위에서는 basic tensor operation으로 neural network를 만들어 보았지만, 이번에는 위에서 직접 써주었던 softmax function과 negative log likelihood function을 `torch.nn.functional` 에 있는 함수들로 바꾸어 본다. 대부분 `torch.nn.functional` 를 `F`로 쓴다
- 위에서 사용한 log softmax activation 과 negative log likelihood는 pytorch에서는 `F.cross_entropy` 함수를 제공해 주어 두개의 함수를 하나의 function으로 사용이 가능하다. 그러기에 위의 log softmax 함수를 쓰지 않아도 된다.
```
import torch.nn.functional as F
loss_func = F.cross_entropy
def model(xb):
return xb @ weights + bias
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
```
### nn.Module과 nn.Parameter를 사용하기
### Refactor using nn.Module and nn.Parameter
- `nn.Module`과 `nn.Parameter`를 사용하면 training loop을 좀 더 깔끔하고 짧게 만들 수 있다. (여기서의 Module은 Python의 Module이 아님을 유의하자. `nn.Module`은 class이다)
- `nn.Module`을 상속받아 우리에게 필요한 weights, bias, 그리고 feed forward function을 가지고 있는 class를 만드는 것이 neural network function을 관리하는데 있어서 가장 효율적인 방법이다
```
from torch import nn
class Mnist_Logistic(nn.Module): # layer가 한개 밖에 없으므로 logistic regression과 같다
def __init__(self):
super().__init__()
self.weights = nn.Parameter(torch.randn(784, 10)/math.sqrt(784))
self.bias = nn.Parameter(torch.zeros(10))
def forward(self, xb):
return xb @ self.weights + self.bias
model = Mnist_Logistic() # neural network object를 생성해 준다
print(loss_func(model(xb), yb))
```
- 전 모델에서는 `weights`와 `bias`를 하나하나 업데이트 시켜 주었던 것과는 다르게, neural network를 object화 시킨다면, `model.parameters()` 라는 `model.zero_grad()` 는 알아서 모든 parameter들을 업데이트 시켜준다
```
def fit():
for epoch in range(epochs):
for i in range((n-1)//bs+1):
start_i = i * bs
endi = start_i +1
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
for p in model.parameters():
p -= p.grad * lr
model.zero_grad()
fit()
```
- train 후, loss가 내려갔는지 확인 해보자
```
print(loss_func(model(xb), yb))
```
### nn.Linear 사용하기
### refactor using nn.Linear
- 위에서 부터 계속 우리는 prediction값을 계산할 때 내적 operation을 사용하여 `xb @ weights + bias`를 사용해 왔다. Pytorch의 `nn.Linear`를 사용하여 직접 써 주었던 linear function을 간단하게 바꾸어 보자
```
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(784, 10) # input과 output을 parameter로 받는다
def forward(self, xb):
return self.lin(xb)
model = Mnist_Logistic()
print(loss_func(model(xb), yb))
fit()
print(loss_func(model(xb), yb))
```
### torch.optim 사용하기
### refactor using optim
- Pytorch는 `torch.optim`으로 다양한 최적화 알고리즘을 제공한다. 깔끔하게 `step` method를 사용하면 optimizer가 1 step을 나아갈 수 있게 해준다. 그 말은,
```
with torch.no_grad():
for p in model.parameters(): p -= p.grad * lr
model.zero_grad()
```
이 부분을
```
opt.step()
opt.zero_grad()
```
로 한번에 처리를 할 수 있다는 것이다
`optim.zero_grad()` 은 model.zero_grad()와 같이 gradient를 0으로 초기화 시켜주기에 다음 배치를 계산하기 전 실행시켜 주어야 하는 method 이다
model과 optimizer를 바로 만들어 낼 수 있는 `get_model` 함수를 만들어 보자
```
from torch import optim
def get_model():
model = Mnist_Logistic()
return model, optim.SGD(model.parameters(), lr = lr)
model, opt = get_model()
print(loss_func(model(xb), yb))
for epoch in range(epochs):
for i in range((n-1)//bs+1):
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb),yb))
```
### Pytorch의 Dataset class를 사용하여 쉽게 각 데이터에 접근하기
### Refactor using dataset
- pytorch의 dataset을 이용하면 각 데이터에 index값으로 쉽게 접근할 수 있다
- 좀 전까지
```
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
```
이 방식으로 데이터와 target 값에 각각 접근하여 training단계에서 mini batch를 가져왔다면,
`train_ds = TensorDataset(x_train, y_train)`로 train data와 target값을 묶은 후,
`xb,yb = train_ds[i*bs : i*bs+bs]` 로 한번에 mini batch를 가져올 수 있다
```
from torch.utils.data import TensorDataset
train_ds = TensorDataset(x_train, y_train)
train_ds[0] # tuple로 data와 target값이 들어가 있다
model, opt = get_model()
for epoch in range(epochs):
for i in range((n-1)//bs+1):
xb, yb = train_ds[i * bs : i * bs + bs] # DataLoader를 써서 mini batch에 접근
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
```
### DataLoader를 사용하기
### Refactor using DataLoader
- `DataLoader`는 배치를 다루는 역할을 가지고 있다. 위 처럼 데이터를 통해 만든 `Dataset` 으로 `DataLoader` 를 만들 수 있다.
- `DataLoader`는 iterator 로서 training단계에서 batch를 iterate 할 때 유용하게 쓰인다
```
from torch.utils.data import DataLoader
# Dataset으로 만든 train_ds를 DataLoader의 argument로 넣어주고, batch size는 위에서 정한 64를 넣어준다
train_dl = DataLoader(train_ds, batch_size = bs)
model, opt = get_model()
for epoch in range(epochs):
for xb, yb in train_dl: # dataloader를 iterate할 수 있다
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb), yb))
```
### Validation set 추가하기
### Add validation set
- 위에서는 training data로 loss를 확인하는 코드만 작성 했지만, 실제적으로는 **항상** validation set도 함께 loop안에 넣어, validation loss도 체크를 해 주어야 과최적화가 되고있는지에 대한 training상황을 확인할 수 있다
- 맨 위에서 불러온 validation data로 `Dataset`과 `DataLoader`를 만들어 보자
```
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)
valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size = bs*2)
```
- 매 epoch마다 validation loss를 계산한 후, 출력해보자
- validation dataset이 생겼으니, `model.train()`은 training전 항상 켜주어야 하는 스위치 이고, `model.eval()`은 prediction을 하기 전 즉 loss 계산 전, 켜주어야 하는 스위치 임을 기억하자. 이유는, batch normalization `nn.BatchNorm2d` 을 해 주거나, dropout `nn.Dropout` 과 같은 과최적화를 막는 기능들이 training에는 필요한 요소들 이지만, validation에서는 꺼 주어야 하는 요소들이기 때문이다
```
model, opt = get_model()
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
model.eval()
with torch.no_grad():
valid_loss = sum(loss_func(model(xb), yb) for xb, yb in valid_dl)
print(epoch, valid_loss / len(valid_dl)) #validation loss 계산
```
### `fit()` 과 `get_data()` 만들기
### create `fit()` and `get_data()`
- `fit()` 과 `get_data()` 함수를 만들어 보자! 두개의 함수를 사용하면 위의 training과 validation loss 계산 단계를 3줄로 줄일 수 있다.
- 먼저 `fit()` 안에서 사용할 `loss_batch` 라는 각 batch의 loss를 계산하는 함수를 만들어 준다. validation set에는 gradient를 계산할 필요가 없으므로, back propagation을 하지 않는다. 그러므로 함수의 parameter `opt`를 parameter로 설정하여, training할때는 켜주고, validation loss를 계산할 때는 꺼준다
- `get_data()`는 Dataset을 받아 DataLoader를 리턴하는 함수이다
```
# 각 batch의 loss를 계산
def loss_batch(model, loss_func, xb, yb, opt=None):
loss = loss_func(model(xb), yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(xb)
def fit(epochs, model, loss_func, opt, train_dl, valid_dl):
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
loss_batch(model, loss_func, xb, yb, opt) # training할 때는 optimizer 켜주기
model.eval()
with torch.no_grad(): # evaluation set 에는 gradient 계산 하지 않는다
losses, nums = zip(*[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl])
val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums) # loss의 average계산
print(epoch, val_loss)
def get_data(train_ds, valid_ds, bs):
return (
DataLoader(train_ds, batch_size=bs, shuffle=True),
DataLoader(valid_ds, batch_size=bs * 2),
)
```
- 단 3줄로 DataLoader 생성에서 부터, 모델을 만들고 training을 하기까지의 과정을 아래와 같이 나타낼 수 있다
```
epochs = 6
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)
model, opt = get_model()
fit(epochs, model, loss_func, opt, train_dl, valid_dl)
```
- 다음 Pytorch tutorial에서는 CNN과 같은 특정한 architecture를 실습해 보려 한다
|
github_jupyter
|
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision
from torch import nn, optim
from torch.autograd import Variable
from pathlib import Path
import requests
DATA_PATH = Path("data")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents=True, exist_ok=True)
URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open("wb").write(content)
import pickle
import gzip
with gzip.open((PATH/FILENAME).as_posix(), "rb") as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
import torch
x_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid))
# target value has 10 classes 0 to 9
x_train.shape, y_train.min(), y_train.max()
import math
weights = torch.randn(784, 10) / math.sqrt(784) # Xavier initialization multiplies 1/sqrt(n)
weights.requires_grad_()
bias = torch.zeros(10, requires_grad=True)
def log_softmax(x):
return x - x.exp().sum(-1).log().unsqueeze(-1)
# sum(-1) represents row sum and unsqueeze(-1) adds an additional dimension to the last index
def model(xb):
return log_softmax(xb @ weights + bias) # @ represents dot product operation
bs = 64 # batch size is normally power of 2 (64, 128, etc)
xb = x_train[0:bs] # one mini batch of 64 images
preds = model(xb)
print(preds[0], preds.shape)
def nll(input, target):
return -input[range(target.shape[0]), target].mean() #target이 yb이고 각 index에 있는 것 mean
loss_func = nll # set log likelihood as a loss function
yb = y_train[0:bs]
print(loss_func(preds, yb))
print(loss_func(preds, yb).dtype, preds.shape, yb.shape)
a = torch.Tensor([1,2,3,4,5,6,1,2])
torch.argmax(a) # gives you the index
def accuracy(out, yb):
preds = torch.argmax(out, dim=1)
return (preds==yb).float().mean()
print(accuracy(preds, yb)) # 처음에 설정한 weight 값이 random이므로 첫 batch를 돌린 후 accuracy가 낮음을 확인할 수 있다
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
n, c = x_train.shape
from IPython.core.debugger import set_trace #set_trace is useful for debugging in python
lr = 0.5
epochs = 2
for epoch in range(epochs):
for i in range((n-1)//bs+1):
# set_trace()
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i: end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
# because we do not want below actions to be recorded for our next calculation of the gradient
weights -= weights.grad * lr
bias -= bias.grad * lr
weights.grad.zero_()
bias.grad.zero_()
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
import torch.nn.functional as F
loss_func = F.cross_entropy
def model(xb):
return xb @ weights + bias
print(loss_func(model(xb), yb), accuracy(model(xb), yb))
from torch import nn
class Mnist_Logistic(nn.Module): # layer가 한개 밖에 없으므로 logistic regression과 같다
def __init__(self):
super().__init__()
self.weights = nn.Parameter(torch.randn(784, 10)/math.sqrt(784))
self.bias = nn.Parameter(torch.zeros(10))
def forward(self, xb):
return xb @ self.weights + self.bias
model = Mnist_Logistic() # neural network object를 생성해 준다
print(loss_func(model(xb), yb))
def fit():
for epoch in range(epochs):
for i in range((n-1)//bs+1):
start_i = i * bs
endi = start_i +1
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
with torch.no_grad():
for p in model.parameters():
p -= p.grad * lr
model.zero_grad()
fit()
print(loss_func(model(xb), yb))
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(784, 10) # input과 output을 parameter로 받는다
def forward(self, xb):
return self.lin(xb)
model = Mnist_Logistic()
print(loss_func(model(xb), yb))
fit()
print(loss_func(model(xb), yb))
with torch.no_grad():
for p in model.parameters(): p -= p.grad * lr
model.zero_grad()
opt.step()
opt.zero_grad()
from torch import optim
def get_model():
model = Mnist_Logistic()
return model, optim.SGD(model.parameters(), lr = lr)
model, opt = get_model()
print(loss_func(model(xb), yb))
for epoch in range(epochs):
for i in range((n-1)//bs+1):
start_i = i * bs
end_i = start_i + bs
xb = x_train[start_i:end_i]
yb = y_train[start_i:end_i]
pred = model(xb)
loss = loss_func(pred, yb)
loss.backward()
opt.step()
opt.zero_grad()
print(loss_func(model(xb),yb))
이 방식으로 데이터와 target 값에 각각 접근하여 training단계에서 mini batch를 가져왔다면,
`train_ds = TensorDataset(x_train, y_train)`로 train data와 target값을 묶은 후,
`xb,yb = train_ds[i*bs : i*bs+bs]` 로 한번에 mini batch를 가져올 수 있다
### DataLoader를 사용하기
### Refactor using DataLoader
- `DataLoader`는 배치를 다루는 역할을 가지고 있다. 위 처럼 데이터를 통해 만든 `Dataset` 으로 `DataLoader` 를 만들 수 있다.
- `DataLoader`는 iterator 로서 training단계에서 batch를 iterate 할 때 유용하게 쓰인다
### Validation set 추가하기
### Add validation set
- 위에서는 training data로 loss를 확인하는 코드만 작성 했지만, 실제적으로는 **항상** validation set도 함께 loop안에 넣어, validation loss도 체크를 해 주어야 과최적화가 되고있는지에 대한 training상황을 확인할 수 있다
- 맨 위에서 불러온 validation data로 `Dataset`과 `DataLoader`를 만들어 보자
- 매 epoch마다 validation loss를 계산한 후, 출력해보자
- validation dataset이 생겼으니, `model.train()`은 training전 항상 켜주어야 하는 스위치 이고, `model.eval()`은 prediction을 하기 전 즉 loss 계산 전, 켜주어야 하는 스위치 임을 기억하자. 이유는, batch normalization `nn.BatchNorm2d` 을 해 주거나, dropout `nn.Dropout` 과 같은 과최적화를 막는 기능들이 training에는 필요한 요소들 이지만, validation에서는 꺼 주어야 하는 요소들이기 때문이다
### `fit()` 과 `get_data()` 만들기
### create `fit()` and `get_data()`
- `fit()` 과 `get_data()` 함수를 만들어 보자! 두개의 함수를 사용하면 위의 training과 validation loss 계산 단계를 3줄로 줄일 수 있다.
- 먼저 `fit()` 안에서 사용할 `loss_batch` 라는 각 batch의 loss를 계산하는 함수를 만들어 준다. validation set에는 gradient를 계산할 필요가 없으므로, back propagation을 하지 않는다. 그러므로 함수의 parameter `opt`를 parameter로 설정하여, training할때는 켜주고, validation loss를 계산할 때는 꺼준다
- `get_data()`는 Dataset을 받아 DataLoader를 리턴하는 함수이다
- 단 3줄로 DataLoader 생성에서 부터, 모델을 만들고 training을 하기까지의 과정을 아래와 같이 나타낼 수 있다
| 0.811863 | 0.987363 |
# Supervised learning
Supervised learning is the machine learning task of learning a function that maps an input to an output based on example input-output pairs. It infers a function from labeled training data consisting of a set of training examples, and its performance is calculated using a set of testing examples.

We can separate supervised learning into two types of problems: **classification** and **regression**.
- **Classification** asks us to assign data into specific categories. E.g.: given a set of labeled images of [chairs](gloss:chair) or tables, try to identify new photos of chairs or tables.
- **Regression** asks us to understand the relationship between dependent and independent variables. It's commonly used to make predictions, e.g. given a series of historical stock prices, predict the future stock price.
The focus of much recent research in near term quantum supervised learning has been in classification, and with two methods in particular:
## Quantum variational classification

Given an [input set](gloss:input-set), $\mathcal{X}$, and quantum [Hilbert space](gloss:hilbert-space), $\mathcal{H}$, we encode datapoints $\vec{x}_i \in \mathcal{X}$ into quantum states using the quantum [feature map](gloss:feature-map), i.e. $ \class{_feature-map-eq}{U_\Phi ∶ \mathcal{X} \rightarrow \mathcal{H}}$, then process this state with a parameterized quantum circuit $W(\theta)$. The resultant states become $ \class{_state}{|\Psi(x_i,\theta)\rangle = W(\theta)| \Phi(\vec{x}_i)\rangle}$ where parameters are estimated by training to match the target states $|y_i\rangle $ that represent the $y_i$ labels of the training points.
This is covered in the [next page](./variational-classification).
## Quantum kernel estimation

Given an input set, $\mathcal{X}$, and quantum Hilbert space, $\mathcal{H}$, datapoints $\vec{x}_i \in \mathcal{X}$ are encoded into a quantum state by means of the quantum feature map, i.e. $U_\Phi ∶ \mathcal{X} \rightarrow \mathcal{H}$. The inner product of two quantum encoded quantum states define a kernel:
$$ \class{_kernel-function}{K(\vec{x}_i,\vec{x}_j)} \equiv \langle \Phi(\vec{x}_i) | \Phi(\vec{x}_j)\rangle_{\mathcal{H}}$$
which is analogous to a kernel in classical machine learning.
This is covered in the [Quantum kernel estimation page](./quantum-feature-maps-kernels).
Both methods require a way to encode the data into a quantum state. There are several strategies to define the quantum feature map, or encoding, as discussed in a previous [section](./data-encoding). It is a key step in the success of the classification task, and to eventually obtain any [quantum advantage](gloss:quantum-advantage), we need the feature map to be classically intractable.
## References
1. Maria Schuld and Francesco Petruccione, *Supervised Learning with Quantum Computers*, Springer 2018, [doi:10.1007/978-3-319-96424-9](https://www.springer.com/gp/book/9783319964232).
|
github_jupyter
|
# Supervised learning
Supervised learning is the machine learning task of learning a function that maps an input to an output based on example input-output pairs. It infers a function from labeled training data consisting of a set of training examples, and its performance is calculated using a set of testing examples.

We can separate supervised learning into two types of problems: **classification** and **regression**.
- **Classification** asks us to assign data into specific categories. E.g.: given a set of labeled images of [chairs](gloss:chair) or tables, try to identify new photos of chairs or tables.
- **Regression** asks us to understand the relationship between dependent and independent variables. It's commonly used to make predictions, e.g. given a series of historical stock prices, predict the future stock price.
The focus of much recent research in near term quantum supervised learning has been in classification, and with two methods in particular:
## Quantum variational classification

Given an [input set](gloss:input-set), $\mathcal{X}$, and quantum [Hilbert space](gloss:hilbert-space), $\mathcal{H}$, we encode datapoints $\vec{x}_i \in \mathcal{X}$ into quantum states using the quantum [feature map](gloss:feature-map), i.e. $ \class{_feature-map-eq}{U_\Phi ∶ \mathcal{X} \rightarrow \mathcal{H}}$, then process this state with a parameterized quantum circuit $W(\theta)$. The resultant states become $ \class{_state}{|\Psi(x_i,\theta)\rangle = W(\theta)| \Phi(\vec{x}_i)\rangle}$ where parameters are estimated by training to match the target states $|y_i\rangle $ that represent the $y_i$ labels of the training points.
This is covered in the [next page](./variational-classification).
## Quantum kernel estimation

Given an input set, $\mathcal{X}$, and quantum Hilbert space, $\mathcal{H}$, datapoints $\vec{x}_i \in \mathcal{X}$ are encoded into a quantum state by means of the quantum feature map, i.e. $U_\Phi ∶ \mathcal{X} \rightarrow \mathcal{H}$. The inner product of two quantum encoded quantum states define a kernel:
$$ \class{_kernel-function}{K(\vec{x}_i,\vec{x}_j)} \equiv \langle \Phi(\vec{x}_i) | \Phi(\vec{x}_j)\rangle_{\mathcal{H}}$$
which is analogous to a kernel in classical machine learning.
This is covered in the [Quantum kernel estimation page](./quantum-feature-maps-kernels).
Both methods require a way to encode the data into a quantum state. There are several strategies to define the quantum feature map, or encoding, as discussed in a previous [section](./data-encoding). It is a key step in the success of the classification task, and to eventually obtain any [quantum advantage](gloss:quantum-advantage), we need the feature map to be classically intractable.
## References
1. Maria Schuld and Francesco Petruccione, *Supervised Learning with Quantum Computers*, Springer 2018, [doi:10.1007/978-3-319-96424-9](https://www.springer.com/gp/book/9783319964232).
| 0.900351 | 0.994795 |
# Adagrad
:label:`sec_adagrad`
Let us begin by considering learning problems with features that occur infrequently.
## Sparse Features and Learning Rates
Imagine that we are training a language model. To get good accuracy we typically want to decrease the learning rate as we keep on training, usually at a rate of $\mathcal{O}(t^{-\frac{1}{2}})$ or slower. Now consider a model training on sparse features, i.e., features that occur only infrequently. This is common for natural language, e.g., it is a lot less likely that we will see the word *preconditioning* than *learning*. However, it is also common in other areas such as computational advertising and personalized collaborative filtering. After all, there are many things that are of interest only for a small number of people.
Parameters associated with infrequent features only receive meaningful updates whenever these features occur. Given a decreasing learning rate we might end up in a situation where the parameters for common features converge rather quickly to their optimal values, whereas for infrequent features we are still short of observing them sufficiently frequently before their optimal values can be determined. In other words, the learning rate either decreases too slowly for frequent features or too quickly for infrequent ones.
A possible hack to redress this issue would be to count the number of times we see a particular feature and to use this as a clock for adjusting learning rates. That is, rather than choosing a learning rate of the form $\eta = \frac{\eta_0}{\sqrt{t + c}}$ we could use $\eta_i = \frac{\eta_0}{\sqrt{s(i, t) + c}}$. Here $s(i, t)$ counts the number of nonzeros for feature $i$ that we have observed up to time $t$. This is actually quite easy to implement at no meaningful overhead. However, it fails whenever we do not quite have sparsity but rather just data where the gradients are often very small and only rarely large. After all, it is unclear where one would draw the line between something that qualifies as an observed feature or not.
Adagrad by :cite:`Duchi.Hazan.Singer.2011` addresses this by replacing the rather crude counter $s(i, t)$ by an aggregate of the squares of previously observed gradients. In particular, it uses $s(i, t+1) = s(i, t) + \left(\partial_i f(\mathbf{x})\right)^2$ as a means to adjust the learning rate. This has two benefits: first, we no longer need to decide just when a gradient is large enough. Second, it scales automatically with the magnitude of the gradients. Coordinates that routinely correspond to large gradients are scaled down significantly, whereas others with small gradients receive a much more gentle treatment. In practice this leads to a very effective optimization procedure for computational advertising and related problems. But this hides some of the additional benefits inherent in Adagrad that are best understood in the context of preconditioning.
## Preconditioning
Convex optimization problems are good for analyzing the characteristics of algorithms. After all, for most nonconvex problems it is difficult to derive meaningful theoretical guarantees, but *intuition* and *insight* often carry over. Let us look at the problem of minimizing $f(\mathbf{x}) = \frac{1}{2} \mathbf{x}^\top \mathbf{Q} \mathbf{x} + \mathbf{c}^\top \mathbf{x} + b$.
As we saw in :numref:`sec_momentum`, it is possible to rewrite this problem in terms of its eigendecomposition $\mathbf{Q} = \mathbf{U}^\top \boldsymbol{\Lambda} \mathbf{U}$ to arrive at a much simplified problem where each coordinate can be solved individually:
$$f(\mathbf{x}) = \bar{f}(\bar{\mathbf{x}}) = \frac{1}{2} \bar{\mathbf{x}}^\top \boldsymbol{\Lambda} \bar{\mathbf{x}} + \bar{\mathbf{c}}^\top \bar{\mathbf{x}} + b.$$
Here we used $\mathbf{x} = \mathbf{U} \mathbf{x}$ and consequently $\mathbf{c} = \mathbf{U} \mathbf{c}$. The modified problem has as its minimizer $\bar{\mathbf{x}} = -\boldsymbol{\Lambda}^{-1} \bar{\mathbf{c}}$ and minimum value $-\frac{1}{2} \bar{\mathbf{c}}^\top \boldsymbol{\Lambda}^{-1} \bar{\mathbf{c}} + b$. This is much easier to compute since $\boldsymbol{\Lambda}$ is a diagonal matrix containing the eigenvalues of $\mathbf{Q}$.
If we perturb $\mathbf{c}$ slightly we would hope to find only slight changes in the minimizer of $f$. Unfortunately this is not the case. While slight changes in $\mathbf{c}$ lead to equally slight changes in $\bar{\mathbf{c}}$, this is not the case for the minimizer of $f$ (and of $\bar{f}$ respectively). Whenever the eigenvalues $\boldsymbol{\Lambda}_i$ are large we will see only small changes in $\bar{x}_i$ and in the minimum of $\bar{f}$. Conversely, for small $\boldsymbol{\Lambda}_i$ changes in $\bar{x}_i$ can be dramatic. The ratio between the largest and the smallest eigenvalue is called the condition number of an optimization problem.
$$\kappa = \frac{\boldsymbol{\Lambda}_1}{\boldsymbol{\Lambda}_d}.$$
If the condition number $\kappa$ is large, it is difficult to solve the optimization problem accurately. We need to ensure that we are careful in getting a large dynamic range of values right. Our analysis leads to an obvious, albeit somewhat naive question: couldn't we simply "fix" the problem by distorting the space such that all eigenvalues are $1$. In theory this is quite easy: we only need the eigenvalues and eigenvectors of $\mathbf{Q}$ to rescale the problem from $\mathbf{x}$ to one in $\mathbf{z} := \boldsymbol{\Lambda}^{\frac{1}{2}} \mathbf{U} \mathbf{x}$. In the new coordinate system $\mathbf{x}^\top \mathbf{Q} \mathbf{x}$ could be simplified to $\|\mathbf{z}\|^2$. Alas, this is a rather impractical suggestion. Computing eigenvalues and eigenvectors is in general *much more* expensive than solving the actual problem.
While computing eigenvalues exactly might be expensive, guessing them and computing them even somewhat approximately may already be a lot better than not doing anything at all. In particular, we could use the diagonal entries of $\mathbf{Q}$ and rescale it accordingly. This is *much* cheaper than computing eigenvalues.
$$\tilde{\mathbf{Q}} = \mathrm{diag}^{-\frac{1}{2}}(\mathbf{Q}) \mathbf{Q} \mathrm{diag}^{-\frac{1}{2}}(\mathbf{Q}).$$
In this case we have $\tilde{\mathbf{Q}}_{ij} = \mathbf{Q}_{ij} / \sqrt{\mathbf{Q}_{ii} \mathbf{Q}_{jj}}$ and specifically $\tilde{\mathbf{Q}}_{ii} = 1$ for all $i$. In most cases this simplifies the condition number considerably. For instance, the cases we discussed previously, this would entirely eliminate the problem at hand since the problem is axis aligned.
Unfortunately we face yet another problem: in deep learning we typically do not even have access to the second derivative of the objective function: for $\mathbf{x} \in \mathbb{R}^d$ the second derivative even on a minibatch may require $\mathcal{O}(d^2)$ space and work to compute, thus making it practically infeasible. The ingenious idea of Adagrad is to use a proxy for that elusive diagonal of the Hessian that is both relatively cheap to compute and effective---the magnitude of the gradient itself.
In order to see why this works, let us look at $\bar{f}(\bar{\mathbf{x}})$. We have that
$$\partial_{\bar{\mathbf{x}}} \bar{f}(\bar{\mathbf{x}}) = \boldsymbol{\Lambda} \bar{\mathbf{x}} + \bar{\mathbf{c}} = \boldsymbol{\Lambda} \left(\bar{\mathbf{x}} - \bar{\mathbf{x}}_0\right),$$
where $\bar{\mathbf{x}}_0$ is the minimizer of $\bar{f}$. Hence the magnitude of the gradient depends both on $\boldsymbol{\Lambda}$ and the distance from optimality. If $\bar{\mathbf{x}} - \bar{\mathbf{x}}_0$ didn't change, this would be all that's needed. After all, in this case the magnitude of the gradient $\partial_{\bar{\mathbf{x}}} \bar{f}(\bar{\mathbf{x}})$ suffices. Since AdaGrad is a stochastic gradient descent algorithm, we will see gradients with nonzero variance even at optimality. As a result we can safely use the variance of the gradients as a cheap proxy for the scale of the Hessian. A thorough analysis is beyond the scope of this section (it would be several pages). We refer the reader to :cite:`Duchi.Hazan.Singer.2011` for details.
## The Algorithm
Let us formalize the discussion from above. We use the variable $\mathbf{s}_t$ to accumulate past gradient variance as follows.
$$\begin{aligned}
\mathbf{g}_t & = \partial_{\mathbf{w}} l(y_t, f(\mathbf{x}_t, \mathbf{w})), \\
\mathbf{s}_t & = \mathbf{s}_{t-1} + \mathbf{g}_t^2, \\
\mathbf{w}_t & = \mathbf{w}_{t-1} - \frac{\eta}{\sqrt{\mathbf{s}_t + \epsilon}} \cdot \mathbf{g}_t.
\end{aligned}$$
Here the operation are applied coordinate wise. That is, $\mathbf{v}^2$ has entries $v_i^2$. Likewise $\frac{1}{\sqrt{v}}$ has entries $\frac{1}{\sqrt{v_i}}$ and $\mathbf{u} \cdot \mathbf{v}$ has entries $u_i v_i$. As before $\eta$ is the learning rate and $\epsilon$ is an additive constant that ensures that we do not divide by $0$. Last, we initialize $\mathbf{s}_0 = \mathbf{0}$.
Just like in the case of momentum we need to keep track of an auxiliary variable, in this case to allow for an individual learning rate per coordinate. This does not increase the cost of Adagrad significantly relative to SGD, simply since the main cost is typically to compute $l(y_t, f(\mathbf{x}_t, \mathbf{w}))$ and its derivative.
Note that accumulating squared gradients in $\mathbf{s}_t$ means that $\mathbf{s}_t$ grows essentially at linear rate (somewhat slower than linearly in practice, since the gradients initially diminish). This leads to an $\mathcal{O}(t^{-\frac{1}{2}})$ learning rate, albeit adjusted on a per coordinate basis. For convex problems this is perfectly adequate. In deep learning, though, we might want to decrease the learning rate rather more slowly. This led to a number of Adagrad variants that we will discuss in the subsequent chapters. For now let us see how it behaves in a quadratic convex problem. We use the same problem as before:
$$f(\mathbf{x}) = 0.1 x_1^2 + 2 x_2^2.$$
We are going to implement Adagrad using the same learning rate previously, i.e., $\eta = 0.4$. As we can see, the iterative trajectory of the independent variable is smoother. However, due to the cumulative effect of $\boldsymbol{s}_t$, the learning rate continuously decays, so the independent variable does not move as much during later stages of iteration.
```
%matplotlib inline
import math
import tensorflow as tf
from d2l import tensorflow as d2l
def adagrad_2d(x1, x2, s1, s2):
eps = 1e-6
g1, g2 = 0.2 * x1, 4 * x2
s1 += g1 ** 2
s2 += g2 ** 2
x1 -= eta / math.sqrt(s1 + eps) * g1
x2 -= eta / math.sqrt(s2 + eps) * g2
return x1, x2, s1, s2
def f_2d(x1, x2):
return 0.1 * x1 ** 2 + 2 * x2 ** 2
eta = 0.4
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))
```
As we increase the learning rate to $2$ we see much better behavior. This already indicates that the decrease in learning rate might be rather aggressive, even in the noise-free case and we need to ensure that parameters converge appropriately.
```
eta = 2
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))
```
## Implementation from Scratch
Just like the momentum method, Adagrad needs to maintain a state variable of the same shape as the parameters.
```
def init_adagrad_states(feature_dim):
s_w = tf.Variable(tf.zeros((feature_dim, 1)))
s_b = tf.Variable(tf.zeros(1))
return (s_w, s_b)
def adagrad(params, grads, states, hyperparams):
eps = 1e-6
for p, s, g in zip(params, states, grads):
s[:].assign(s + tf.math.square(g))
p[:].assign(p - hyperparams['lr'] * g / tf.math.sqrt(s + eps))
```
Compared to the experiment in :numref:`sec_minibatch_sgd` we use a
larger learning rate to train the model.
```
data_iter, feature_dim = d2l.get_data_ch11(batch_size=10)
d2l.train_ch11(adagrad, init_adagrad_states(feature_dim),
{'lr': 0.1}, data_iter, feature_dim);
```
## Concise Implementation
Using the `Trainer` instance of the algorithm `adagrad`, we can invoke the Adagrad algorithm in Gluon.
```
trainer = tf.keras.optimizers.Adagrad
d2l.train_concise_ch11(trainer, {'learning_rate' : 0.1}, data_iter)
```
## Summary
* Adagrad decreases the learning rate dynamically on a per-coordinate basis.
* It uses the magnitude of the gradient as a means of adjusting how quickly progress is achieved - coordinates with large gradients are compensated with a smaller learning rate.
* Computing the exact second derivative is typically infeasible in deep learning problems due to memory and computational constraints. The gradient can be a useful proxy.
* If the optimization problem has a rather uneven structure Adagrad can help mitigate the distortion.
* Adagrad is particularly effective for sparse features where the learning rate needs to decrease more slowly for infrequently occurring terms.
* On deep learning problems Adagrad can sometimes be too aggressive in reducing learning rates. We will discuss strategies for mitigating this in the context of :numref:`sec_adam`.
## Exercises
1. Prove that for an orthogonal matrix $\mathbf{U}$ and a vector $\mathbf{c}$ the following holds: $\|\mathbf{c} - \mathbf{\delta}\|_2 = \|\mathbf{U} \mathbf{c} - \mathbf{U} \mathbf{\delta}\|_2$. Why does this mean that the magnitude of perturbations does not change after an orthogonal change of variables?
1. Try out Adagrad for $f(\mathbf{x}) = 0.1 x_1^2 + 2 x_2^2$ and also for the objective function was rotated by 45 degrees, i.e., $f(\mathbf{x}) = 0.1 (x_1 + x_2)^2 + 2 (x_1 - x_2)^2$. Does it behave differently?
1. Prove [Gerschgorin's circle theorem](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem) which states that eigenvalues $\lambda_i$ of a matrix $\mathbf{M}$ satisfy $|\lambda_i - \mathbf{M}_{jj}| \leq \sum_{k \neq j} |\mathbf{M}_{jk}|$ for at least one choice of $j$.
1. What does Gerschgorin's theorem tell us about the eigenvalues of the diagonally preconditioned matrix $\mathrm{diag}^{-\frac{1}{2}}(\mathbf{M}) \mathbf{M} \mathrm{diag}^{-\frac{1}{2}}(\mathbf{M})$?
1. Try out Adagrad for a proper deep network, such as :numref:`sec_lenet` when applied to Fashion MNIST.
1. How would you need to modify Adagrad to achieve a less aggressive decay in learning rate?
[Discussions](https://discuss.d2l.ai/t/1073)
|
github_jupyter
|
%matplotlib inline
import math
import tensorflow as tf
from d2l import tensorflow as d2l
def adagrad_2d(x1, x2, s1, s2):
eps = 1e-6
g1, g2 = 0.2 * x1, 4 * x2
s1 += g1 ** 2
s2 += g2 ** 2
x1 -= eta / math.sqrt(s1 + eps) * g1
x2 -= eta / math.sqrt(s2 + eps) * g2
return x1, x2, s1, s2
def f_2d(x1, x2):
return 0.1 * x1 ** 2 + 2 * x2 ** 2
eta = 0.4
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))
eta = 2
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))
def init_adagrad_states(feature_dim):
s_w = tf.Variable(tf.zeros((feature_dim, 1)))
s_b = tf.Variable(tf.zeros(1))
return (s_w, s_b)
def adagrad(params, grads, states, hyperparams):
eps = 1e-6
for p, s, g in zip(params, states, grads):
s[:].assign(s + tf.math.square(g))
p[:].assign(p - hyperparams['lr'] * g / tf.math.sqrt(s + eps))
data_iter, feature_dim = d2l.get_data_ch11(batch_size=10)
d2l.train_ch11(adagrad, init_adagrad_states(feature_dim),
{'lr': 0.1}, data_iter, feature_dim);
trainer = tf.keras.optimizers.Adagrad
d2l.train_concise_ch11(trainer, {'learning_rate' : 0.1}, data_iter)
| 0.532425 | 0.989561 |
```
from IPython.core.display import HTML
with open ('style.css', 'r') as file:
css = file.read()
HTML(css)
```
# Japanischer Intelligenz-Test
Im Internet finden Sie unter der Adresse
<a href="https://www.pedagonet.com/Fun/flashgame185.htm">https://www.pedagonet.com/Fun/flashgame185.htm</a>
eine Animation einer Denksport-Aufgabe, die Sie mit Hilfe eines Programmes lösen sollen.
Es geht bei der Aufgabe wieder darum, dass Personen über einen Fluss übersetzen sollen
und dafür nur ein Boot haben, in dem maximal zwei Personen Platz haben.
Bei den Personen handelt es sich um eine Mutter mit zwei Töchtern, einen Vater mit zwei
Söhnen, einen Polizisten und einen Verbrecher.
Bei der Überfahrt sind die folgenden Nebenbedingungen zu beachten:
<ol>
<li> Der Vater darf nicht ohne die Mutter mit einer der Töchter an einem Ufer sein. </li>
<li> Die Mutter darf nicht ohne den Vater mit einem der Söhne an einem Ufer sein. </li>
<li> Wenn der Verbrecher nicht allein ist, dann muss der Polizist auf ihn aufpassen.
Der Verbrecher darf aber alleine sein, denn seine Fußfesseln verhindern,
dass er weglaufen kann. </li>
<li> Nur der Vater, die Mutter und der Polizist können das Boot steuern. </li>
</ol>
## Hilfsfunktionen
Die Funktion $\texttt{power}(M)$ berechnet die Potenz-Menge von $M$, es gilt:
$$ \texttt{power}(M) = 2^M = \bigl\{A \mid A \subseteq M \} $$
```
def power(M):
"This function computes the power set of the set M."
if M == set():
return { frozenset() }
else:
C = set(M) # C is a copy of M as we don't want to change the set M
x = C.pop() # pop removes the element x from the set C
P1 = power(C)
P2 = { A | {x} for A in P1 }
return P1 | P2
```
We will run *breadth-first search* to solve this problem.
```
%run Breadth-First-Search.ipynb
```
## Spezifikation des Rätsels
```
All = { "Polizist", "Ganove", "Mutter", "Vater",
"Anton", "Bruno", "Cindy", "Doris", "Floß"
}
All = frozenset(All)
```
In den folgenden Fällen gibt es ein Problem:
<ol>
<li> Der Vater ist mit einer der der Töchter an einem Ufer und die Mutter ist am anderen Ufer. </li>
<li> Die Mutter ist mit einem der Söhne an einem Ufer und der Vater ist am anderen Ufer. </li>
<li> Der Verbrecher ist mit Vater, Mutter oder einem der Kinder an einem Ufer und der Polizist ist am anderen Ufer. </li>
</ol>
```
def problem(S):
return( 'Vater' in S and 'Cindy' in S and not 'Mutter' in S or
'Vater' in S and 'Doris' in S and not 'Mutter' in S or
'Mutter' in S and 'Anton' in S and not 'Vater' in S or
'Mutter' in S and 'Bruno' in S and not 'Vater' in S or
'Ganove' in S and 'Vater' in S and not 'Polizist' in S or
'Ganove' in S and 'Mutter' in S and not 'Polizist' in S or
'Ganove' in S and 'Cindy' in S and not 'Polizist' in S or
'Ganove' in S and 'Doris' in S and not 'Polizist' in S or
'Ganove' in S and 'Anton' in S and not 'Polizist' in S or
'Ganove' in S and 'Bruno' in S and not 'Polizist' in S
)
```
<tt>States</tt> is the set of all states that do not have a problem.
```
States = { S for S in power(All) if not problem(S) and
not problem(All-S)
}
```
There are either 140 or 138 different states.
```
len(States)
```
<tt>R1</tt> decribes the transitions where the boat crosses the river from left to right.
```
R1 = { (S, S - B) for S in States
for B in power(S)
if S - B in States and 'Floß' in S and 'Floß' not in S - B
and len(B) in (2, 3)
and ('Vater' in B or
'Mutter' in B or
'Polizist' in B
)
}
```
<tt>R1</tt> has 100 elements.
```
len(R1)
```
<tt>R2</tt> is the inverse of <tt>R1</tt>.
```
R2 = {(S2, S1) for (S1, S2) in R1}
R = R1 | R2
dot = dot_graph(R)
dot.render('graph', view=True)
```
Am Anfang sind alle am linken Ufer, am Ende sollen alle auf dem rechten Ufer sein.
```
start = All
goal = set()
Path = search(R, start, goal)
for x in Path:
print(set(x))
```
Zur Lösung des Problems sind 17 Überfahrten notwendig. Die Liste <tt>Path</tt> hat also die Länge 18.
```
len(Path)
```
## Hilfsfunktionen zum Ausdrucken der Lösung
Die folgenden Funktionen dienen dazu, den berechneten Pfad schöner ausdrucken zu können. Hier brauchen Sie nichts verändern, es reicht aus, diese Funktionen auszuführen.
```
def shorten(S):
"""
Shorten all strings in the set S to one character and turn S in to a
sorted list.
"""
return sorted([ w[0] for w in S ])
def mkPair(S, All):
"Given the left shore, compute both the left shore and the right shore."
return (S, All - S);
def my_str(s):
"Print frozen sets as sets."
if len(s) == 0:
return "[]"
else:
return str(shorten(s))
def printPath(Path, All):
"print the path one transition at a time"
for i in range(len(Path)):
(S1, S2) = mkPair(Path[i], All)
if (len(S1) == 0 or len(S2) == 0):
print(my_str(S1), 33 * " ", my_str(S2))
else:
print(my_str(S1), 35 * " ", my_str(S2))
if i + 1 == len(Path):
break
(T1, T2) = mkPair(Path[i+1], All)
if "Floß" in S1:
B = S1 - T1 # the boat crossing from left to right
print(" >>>> ", my_str(B-{"Floß"}), " >>>> ")
else:
B = S2 - T2 # the boat crossing from right to left
print(" <<<< ", my_str(B-{"Floß"}), " <<<< ")
printPath(Path, All)
```
|
github_jupyter
|
from IPython.core.display import HTML
with open ('style.css', 'r') as file:
css = file.read()
HTML(css)
def power(M):
"This function computes the power set of the set M."
if M == set():
return { frozenset() }
else:
C = set(M) # C is a copy of M as we don't want to change the set M
x = C.pop() # pop removes the element x from the set C
P1 = power(C)
P2 = { A | {x} for A in P1 }
return P1 | P2
%run Breadth-First-Search.ipynb
All = { "Polizist", "Ganove", "Mutter", "Vater",
"Anton", "Bruno", "Cindy", "Doris", "Floß"
}
All = frozenset(All)
def problem(S):
return( 'Vater' in S and 'Cindy' in S and not 'Mutter' in S or
'Vater' in S and 'Doris' in S and not 'Mutter' in S or
'Mutter' in S and 'Anton' in S and not 'Vater' in S or
'Mutter' in S and 'Bruno' in S and not 'Vater' in S or
'Ganove' in S and 'Vater' in S and not 'Polizist' in S or
'Ganove' in S and 'Mutter' in S and not 'Polizist' in S or
'Ganove' in S and 'Cindy' in S and not 'Polizist' in S or
'Ganove' in S and 'Doris' in S and not 'Polizist' in S or
'Ganove' in S and 'Anton' in S and not 'Polizist' in S or
'Ganove' in S and 'Bruno' in S and not 'Polizist' in S
)
States = { S for S in power(All) if not problem(S) and
not problem(All-S)
}
len(States)
R1 = { (S, S - B) for S in States
for B in power(S)
if S - B in States and 'Floß' in S and 'Floß' not in S - B
and len(B) in (2, 3)
and ('Vater' in B or
'Mutter' in B or
'Polizist' in B
)
}
len(R1)
R2 = {(S2, S1) for (S1, S2) in R1}
R = R1 | R2
dot = dot_graph(R)
dot.render('graph', view=True)
start = All
goal = set()
Path = search(R, start, goal)
for x in Path:
print(set(x))
len(Path)
def shorten(S):
"""
Shorten all strings in the set S to one character and turn S in to a
sorted list.
"""
return sorted([ w[0] for w in S ])
def mkPair(S, All):
"Given the left shore, compute both the left shore and the right shore."
return (S, All - S);
def my_str(s):
"Print frozen sets as sets."
if len(s) == 0:
return "[]"
else:
return str(shorten(s))
def printPath(Path, All):
"print the path one transition at a time"
for i in range(len(Path)):
(S1, S2) = mkPair(Path[i], All)
if (len(S1) == 0 or len(S2) == 0):
print(my_str(S1), 33 * " ", my_str(S2))
else:
print(my_str(S1), 35 * " ", my_str(S2))
if i + 1 == len(Path):
break
(T1, T2) = mkPair(Path[i+1], All)
if "Floß" in S1:
B = S1 - T1 # the boat crossing from left to right
print(" >>>> ", my_str(B-{"Floß"}), " >>>> ")
else:
B = S2 - T2 # the boat crossing from right to left
print(" <<<< ", my_str(B-{"Floß"}), " <<<< ")
printPath(Path, All)
| 0.371023 | 0.856932 |
# CIFAR10 Image Classification Using LeNet
In this tutorial, we are going to walk through the logic in `lenet_cifar10_adversarial.py` shown below and provide step-by-step instructions.
```
!cat lenet_cifar10_adversarial.py
```
## Step 1: Prepare training and evaluation dataset, create FastEstimator `Pipeline`
`Pipeline` can take both data in memory and data in disk. In this example, we are going to use data in memory by loading data with `tf.keras.datasets.cifar10`
```
import tensorflow as tf
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
print("train image shape is {}".format(x_train.shape))
print("train label shape is {}".format(y_train.shape))
print("eval image shape is {}".format(x_eval.shape))
print("eval label shape is {}".format(y_eval.shape))
```
For in-memory data in `Pipeline`, the data format should be a nested dictionary like: {"mode1": {"feature1": numpy_array, "feature2": numpy_array, ...}, ...}. Each `mode` can be either `train` or `eval`, in our case, we have both `train` and `eval`. `feature` is the feature name, in our case, we have `x` and `y`.
```
data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
```
Now we are ready to define `Pipeline`, we want to apply a `Minmax` online preprocessing to the image feature `x` for both training and evaluation:
```
import fastestimator as fe
from fastestimator.op.tensorop import Minmax
pipeline = fe.Pipeline(batch_size=50, data=data, ops=Minmax(inputs="x", outputs="x"))
```
## Step 2: Prepare model, create FastEstimator `Network`
First, we have to define the network architecture in `tf.keras.Model` or `tf.keras.Sequential`, for a popular architecture like LeNet, FastEstimator has it implemented already in [fastestimator.architecture.lenet](https://github.com/fastestimator/fastestimator/blob/master/fastestimator/architecture/lenet.py). After defining the architecture, users are expected to feed the architecture definition and its associated model name, optimizer and loss name (default to be 'loss') to `FEModel`.
```
from fastestimator.architecture import LeNet
from fastestimator import FEModel
model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=10), model_name="LeNet", optimizer="adam")
```
We can now define a simple `Network`: given with a batch data with key `x` and `y`, we have to work our way to `loss` with series of operators. `ModelOp` is an operator that contains a model.
```
from fastestimator.op.tensorop import ModelOp, SparseCategoricalCrossentropy
simple_network = fe.Network(ops=[ModelOp(inputs="x", model=model, outputs="y_pred"),
SparseCategoricalCrossentropy(y_pred="y_pred", y_true="y", outputs="loss")])
```
One advantage of `FastEstimator`, though, is that it is easy to construct much more complicated graphs. In this example, we want to conduct training by generating adversarially perturbed images and training against them, since this has been shown to make neural networks more robust against future [attacks](https://arxiv.org/abs/1412.6572). To achieve this in `FastEstimator`, we start by running the input through the model op and computing loss as before, but this time the `ModelOp` has the track_input flag set to `True` in order to indicate that gradients should be computed with respect to the input image in addition to the model weights. The network then generates an adversarial sample image using the `AdversarialSample` augmentation module, and runs that image through the model. Finally, the model is trained using an average of the raw loss and adversarial loss. Note that the adversarial part of the process needs only be done during training (not evaluation) and so the `mode` of these final four operations is set to 'train'.
```
from fastestimator.op.tensorop import AdversarialSample, Average
pipeline2 = fe.Pipeline(batch_size=50, data=data, ops=Minmax(inputs="x", outputs="x"))
model2 = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=10), model_name="LeNet", optimizer="adam")
adversarial_network = fe.Network(ops=[
ModelOp(inputs="x", model=model2, outputs="y_pred", track_input=True),
SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", outputs="loss"),
AdversarialSample(inputs=("loss", "x"), outputs="x_adverse", epsilon=0.01, mode="train"),
ModelOp(inputs="x_adverse", model=model2, outputs="y_pred_adverse", mode="train"),
SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred_adverse", outputs="adverse_loss", mode="train"),
Average(inputs=("loss", "adverse_loss"), outputs="loss", mode="train")
])
```
## Step 3: Configure training, create `Estimator`
During the training loop, we want to: 1) measure accuracy for data data 2) save the model with lowest valdiation loss. The `Trace` class is used for anything related to the training loop, and we will need to import the `Accuracy` and `ModelSaver` traces.
```
import tempfile
import os
from fastestimator.trace import Accuracy, ModelSaver
base_dir = tempfile.mkdtemp()
simple_save_dir = os.path.join(base_dir, 'simple')
adversarial_save_dir = os.path.join(base_dir, 'adverse')
simple_traces = [Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
ModelSaver(model_name="LeNet", save_dir=simple_save_dir, save_best=True)]
adversarial_traces = [Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
ModelSaver(model_name="LeNet", save_dir=adversarial_save_dir, save_best=True)]
```
Now we can define the `Estimator` and specify the training configuation. We will create estimators for both the simple and adversarial networks in order to compare their performances.
```
simple_estimator = fe.Estimator(network=simple_network, pipeline=pipeline, epochs=15, traces=simple_traces, log_steps=500)
adversarial_estimator = fe.Estimator(network=adversarial_network, pipeline=pipeline2, epochs=15, traces=adversarial_traces, log_steps=500)
```
## Step 4: Training
We'll start by training the regular network (takes about 7.7 minutes on a 2015 MacBookPro CPU - 2.5 GHz Intel Core i7). The network should attain an evaluation accuracy around 71%
```
simple_estimator.fit()
```
Next we train the network adversarially. This process takes longer (about 17 minutes) since it requires two different gradient computations and model evaluations per forward step rather than one. It is also slower to converge since the training process is more difficult, though should also get to around 71% evaluation accuracy.
```
adversarial_estimator.fit()
```
## Step 5: Inferencing and Adversarial Attacks
After training, the model is saved to a temporary folder. We can load the model from file and do inferencing on a sample image.
```
simple_model_path = os.path.join(simple_save_dir, 'LeNet_best_loss.h5')
simple_model = tf.keras.models.load_model(simple_model_path, compile=False)
adversarial_model_path = os.path.join(adversarial_save_dir, 'LeNet_best_loss.h5')
adversarial_model = tf.keras.models.load_model(adversarial_model_path, compile=False)
```
Lets consider a few images from the evaluation dataset and see how the networks respond to adversarial attacks
```
import matplotlib.pyplot as plt
import numpy as np
from fastestimator.interpretation import show_image
from fastestimator.op.tensorop import Minmax
minmax = Minmax()
num_vis = 10
num_samples = 10000
fig, axis = plt.subplots(1, num_vis, figsize=(21, 3))
sample_images = tf.stack([minmax.forward(tf.constant(x), {}) for x in x_eval[0:num_samples]])
sample_labels = tf.constant(y_eval[0:num_samples])
for idx in range(num_vis):
show_image(sample_images[idx], axis=axis[idx])
class_dictionary = {
0: "airplane", 1: "car", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck"
}
print("True Labels: [{}]".format(
', '.join(['{:<8}' for _ in range(num_vis)])).format(*[class_dictionary[x[0].numpy()] for x in sample_labels][0:num_vis]))
simple_prediction_score = simple_model.predict(sample_images)
simple_accuracy = 1.0 - np.sum(np.not_equal(np.argmax(simple_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Simple Model Predicts: [{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), simple_accuracy, num_samples).format(
*[class_dictionary[x] for x in np.argmax(simple_prediction_score, axis=1)][0:num_vis]))
adversarial_prediction_score = adversarial_model.predict(sample_images)
adversarial_accuracy = 1.0 - np.sum(np.not_equal(np.argmax(adversarial_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Adversarial Model Predicts:[{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), adversarial_accuracy, num_samples).format(
*[class_dictionary[x] for x in np.argmax(adversarial_prediction_score, axis=1)][0:num_vis]))
```
As we can see, both the simple model and the one trained against adversarial samples correctly identify a majority of the evaluation images, with a population accuracy around 70% each. Now, to create the adversarial versions of the images, we'll simulate the adversarial augmentation object
```
def attack(images, model, ground_truth, epsilon):
loss_obj = tf.losses.SparseCategoricalCrossentropy(reduction='none')
with tf.GradientTape() as tape:
tape.watch(images)
pred = model(images, training=False)
loss = loss_obj(ground_truth, pred)
gradients = tape.gradient(loss, images)
adverse_images = tf.clip_by_value(images + epsilon * tf.sign(gradients),
tf.reduce_min(images),
tf.reduce_max(images))
return adverse_images
```
First we will generate adversarial images by inspecting the gradients of the simple model, and see how well the two models can resist the attack
```
adverse_images = attack(sample_images, simple_model, sample_labels, 0.01)
fig, axis = plt.subplots(1, num_vis, figsize=(21, 3))
for idx in range(num_vis):
show_image(adverse_images[idx], axis=axis[idx])
print("True Labels: [{}]".format(
', '.join(['{:<8}' for _ in range(num_vis)])).format(*[class_dictionary[x[0].numpy()] for x in sample_labels][0:num_vis]))
simple_prediction_score = simple_model.predict(adverse_images)
simple_accuracy_w = 1.0 - np.sum(np.not_equal(np.argmax(simple_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Simple Model Predicts: [{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), simple_accuracy_w, num_samples).format(
*[class_dictionary[x] for x in np.argmax(simple_prediction_score, axis=1)][0:num_vis]))
adversarial_prediction_score = adversarial_model.predict(adverse_images)
adversarial_accuracy_b = 1.0 - np.sum(np.not_equal(np.argmax(adversarial_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Adversarial Model Predicts:[{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), adversarial_accuracy_b, num_samples).format(
*[class_dictionary[x] for x in np.argmax(adversarial_prediction_score, axis=1)][0:num_vis]))
```
Even though these adversarially attacked images look basically the same as the original images, the accuracy of the traditionally trained model has dropped to 31.9%. The adversarially trained model also sees a reduction in accuracy, but only to 65.2%. It is, however, an incomplete/unfair comparison since the attack is white-box against the simple network but black-box against the adversarially trained network. Let's now generate adversarial images using the adversarially trainined network instead and see how well the models resist the attack
```
adverse_images = attack(sample_images, adversarial_model, sample_labels, 0.01)
fig, axis = plt.subplots(1, num_vis, figsize=(21, 3))
for idx in range(num_vis):
show_image(adverse_images[idx], axis=axis[idx])
print("True Labels: [{}]".format(
', '.join(['{:<8}' for _ in range(num_vis)])).format(*[class_dictionary[x[0].numpy()] for x in sample_labels][0:num_vis]))
simple_prediction_score = simple_model.predict(adverse_images)
simple_accuracy_b = 1.0 - np.sum(np.not_equal(np.argmax(simple_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Simple Model Predicts: [{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), simple_accuracy_b, num_samples).format(
*[class_dictionary[x] for x in np.argmax(simple_prediction_score, axis=1)][0:num_vis]))
adversarial_prediction_score = adversarial_model.predict(adverse_images)
adversarial_accuracy_w = 1.0 - np.sum(np.not_equal(np.argmax(adversarial_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Adversarial Model Predicts:[{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), adversarial_accuracy_w, num_samples).format(
*[class_dictionary[x] for x in np.argmax(adversarial_prediction_score, axis=1)][0:num_vis]))
```
Under this attack, the accuracy of the traditionally trained model has dropped to 61.6%. The adversarially trained model meanwhile has its performance reduced to 49.1%. While the raw adversarial accuracy here is now lower than the simple model, the performance loss is significantly less than it was for the simple model in the previous attack. To properly compare the models, the white-box and black-box attacks should be compared pairwise against one another:
```
print("White box attack vs simple network: {:2.2%} accuracy".format(simple_accuracy_w - simple_accuracy))
print("White box attack vs adversarial network: {:2.2%} accuracy".format(adversarial_accuracy_w - simple_accuracy))
print()
print("Black box attack vs simple network: {:2.2%} accuracy".format(simple_accuracy_b - simple_accuracy))
print("Black box attack vs adversarial network: {:2.2%} accuracy".format(adversarial_accuracy_b - simple_accuracy))
```
Adversarially attacking the simple network using white-box gradient analysis cost nearly 40 percentage points of accuracy. The same attack conducted against the adversarially trained network cost only around 23 percentage points. Likewise, a blackbox attack against the simple network cost 10 percentage points versus 6.5 against the adversarial network. This shows that the adversarial training process makes a network approximately twice as robust against future adversarial attacks. Whether such benefits are worth the increased training time would, of course, depend on the model deployment use-case.
|
github_jupyter
|
!cat lenet_cifar10_adversarial.py
import tensorflow as tf
(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.cifar10.load_data()
print("train image shape is {}".format(x_train.shape))
print("train label shape is {}".format(y_train.shape))
print("eval image shape is {}".format(x_eval.shape))
print("eval label shape is {}".format(y_eval.shape))
data = {"train": {"x": x_train, "y": y_train}, "eval": {"x": x_eval, "y": y_eval}}
import fastestimator as fe
from fastestimator.op.tensorop import Minmax
pipeline = fe.Pipeline(batch_size=50, data=data, ops=Minmax(inputs="x", outputs="x"))
from fastestimator.architecture import LeNet
from fastestimator import FEModel
model = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=10), model_name="LeNet", optimizer="adam")
from fastestimator.op.tensorop import ModelOp, SparseCategoricalCrossentropy
simple_network = fe.Network(ops=[ModelOp(inputs="x", model=model, outputs="y_pred"),
SparseCategoricalCrossentropy(y_pred="y_pred", y_true="y", outputs="loss")])
from fastestimator.op.tensorop import AdversarialSample, Average
pipeline2 = fe.Pipeline(batch_size=50, data=data, ops=Minmax(inputs="x", outputs="x"))
model2 = FEModel(model_def=lambda: LeNet(input_shape=x_train.shape[1:], classes=10), model_name="LeNet", optimizer="adam")
adversarial_network = fe.Network(ops=[
ModelOp(inputs="x", model=model2, outputs="y_pred", track_input=True),
SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred", outputs="loss"),
AdversarialSample(inputs=("loss", "x"), outputs="x_adverse", epsilon=0.01, mode="train"),
ModelOp(inputs="x_adverse", model=model2, outputs="y_pred_adverse", mode="train"),
SparseCategoricalCrossentropy(y_true="y", y_pred="y_pred_adverse", outputs="adverse_loss", mode="train"),
Average(inputs=("loss", "adverse_loss"), outputs="loss", mode="train")
])
import tempfile
import os
from fastestimator.trace import Accuracy, ModelSaver
base_dir = tempfile.mkdtemp()
simple_save_dir = os.path.join(base_dir, 'simple')
adversarial_save_dir = os.path.join(base_dir, 'adverse')
simple_traces = [Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
ModelSaver(model_name="LeNet", save_dir=simple_save_dir, save_best=True)]
adversarial_traces = [Accuracy(true_key="y", pred_key="y_pred", output_name='acc'),
ModelSaver(model_name="LeNet", save_dir=adversarial_save_dir, save_best=True)]
simple_estimator = fe.Estimator(network=simple_network, pipeline=pipeline, epochs=15, traces=simple_traces, log_steps=500)
adversarial_estimator = fe.Estimator(network=adversarial_network, pipeline=pipeline2, epochs=15, traces=adversarial_traces, log_steps=500)
simple_estimator.fit()
adversarial_estimator.fit()
simple_model_path = os.path.join(simple_save_dir, 'LeNet_best_loss.h5')
simple_model = tf.keras.models.load_model(simple_model_path, compile=False)
adversarial_model_path = os.path.join(adversarial_save_dir, 'LeNet_best_loss.h5')
adversarial_model = tf.keras.models.load_model(adversarial_model_path, compile=False)
import matplotlib.pyplot as plt
import numpy as np
from fastestimator.interpretation import show_image
from fastestimator.op.tensorop import Minmax
minmax = Minmax()
num_vis = 10
num_samples = 10000
fig, axis = plt.subplots(1, num_vis, figsize=(21, 3))
sample_images = tf.stack([minmax.forward(tf.constant(x), {}) for x in x_eval[0:num_samples]])
sample_labels = tf.constant(y_eval[0:num_samples])
for idx in range(num_vis):
show_image(sample_images[idx], axis=axis[idx])
class_dictionary = {
0: "airplane", 1: "car", 2: "bird", 3: "cat", 4: "deer", 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck"
}
print("True Labels: [{}]".format(
', '.join(['{:<8}' for _ in range(num_vis)])).format(*[class_dictionary[x[0].numpy()] for x in sample_labels][0:num_vis]))
simple_prediction_score = simple_model.predict(sample_images)
simple_accuracy = 1.0 - np.sum(np.not_equal(np.argmax(simple_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Simple Model Predicts: [{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), simple_accuracy, num_samples).format(
*[class_dictionary[x] for x in np.argmax(simple_prediction_score, axis=1)][0:num_vis]))
adversarial_prediction_score = adversarial_model.predict(sample_images)
adversarial_accuracy = 1.0 - np.sum(np.not_equal(np.argmax(adversarial_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Adversarial Model Predicts:[{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), adversarial_accuracy, num_samples).format(
*[class_dictionary[x] for x in np.argmax(adversarial_prediction_score, axis=1)][0:num_vis]))
def attack(images, model, ground_truth, epsilon):
loss_obj = tf.losses.SparseCategoricalCrossentropy(reduction='none')
with tf.GradientTape() as tape:
tape.watch(images)
pred = model(images, training=False)
loss = loss_obj(ground_truth, pred)
gradients = tape.gradient(loss, images)
adverse_images = tf.clip_by_value(images + epsilon * tf.sign(gradients),
tf.reduce_min(images),
tf.reduce_max(images))
return adverse_images
adverse_images = attack(sample_images, simple_model, sample_labels, 0.01)
fig, axis = plt.subplots(1, num_vis, figsize=(21, 3))
for idx in range(num_vis):
show_image(adverse_images[idx], axis=axis[idx])
print("True Labels: [{}]".format(
', '.join(['{:<8}' for _ in range(num_vis)])).format(*[class_dictionary[x[0].numpy()] for x in sample_labels][0:num_vis]))
simple_prediction_score = simple_model.predict(adverse_images)
simple_accuracy_w = 1.0 - np.sum(np.not_equal(np.argmax(simple_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Simple Model Predicts: [{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), simple_accuracy_w, num_samples).format(
*[class_dictionary[x] for x in np.argmax(simple_prediction_score, axis=1)][0:num_vis]))
adversarial_prediction_score = adversarial_model.predict(adverse_images)
adversarial_accuracy_b = 1.0 - np.sum(np.not_equal(np.argmax(adversarial_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Adversarial Model Predicts:[{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), adversarial_accuracy_b, num_samples).format(
*[class_dictionary[x] for x in np.argmax(adversarial_prediction_score, axis=1)][0:num_vis]))
adverse_images = attack(sample_images, adversarial_model, sample_labels, 0.01)
fig, axis = plt.subplots(1, num_vis, figsize=(21, 3))
for idx in range(num_vis):
show_image(adverse_images[idx], axis=axis[idx])
print("True Labels: [{}]".format(
', '.join(['{:<8}' for _ in range(num_vis)])).format(*[class_dictionary[x[0].numpy()] for x in sample_labels][0:num_vis]))
simple_prediction_score = simple_model.predict(adverse_images)
simple_accuracy_b = 1.0 - np.sum(np.not_equal(np.argmax(simple_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Simple Model Predicts: [{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), simple_accuracy_b, num_samples).format(
*[class_dictionary[x] for x in np.argmax(simple_prediction_score, axis=1)][0:num_vis]))
adversarial_prediction_score = adversarial_model.predict(adverse_images)
adversarial_accuracy_w = 1.0 - np.sum(np.not_equal(np.argmax(adversarial_prediction_score, axis=1), tf.reshape(sample_labels, (num_samples,)))) / num_samples
print("Adversarial Model Predicts:[{}] ({:2.1%} accuracy over {} images)".format(
', '.join(['{:<8}' for _ in range(num_vis)]), adversarial_accuracy_w, num_samples).format(
*[class_dictionary[x] for x in np.argmax(adversarial_prediction_score, axis=1)][0:num_vis]))
print("White box attack vs simple network: {:2.2%} accuracy".format(simple_accuracy_w - simple_accuracy))
print("White box attack vs adversarial network: {:2.2%} accuracy".format(adversarial_accuracy_w - simple_accuracy))
print()
print("Black box attack vs simple network: {:2.2%} accuracy".format(simple_accuracy_b - simple_accuracy))
print("Black box attack vs adversarial network: {:2.2%} accuracy".format(adversarial_accuracy_b - simple_accuracy))
| 0.737725 | 0.987179 |
# Stock Price Prediction From Employee / Job Market Information
## Modelling: Linear Model
Objective utilise the Thinknum LinkedIn and Job Postings datasets, along with the Quandl WIKI prices dataset to investigate the effect of hiring practices on stock price. In this notebook I'll begin exploring the increase in predictive power from historic employment data.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from glob import glob
# Utilities
from utils import *
%matplotlib inline
PATH = Path('D:\data\jobs')
link, companies, stocks = data_load(PATH)
```
Let's start with some of the series that had the most promising cross correlations.
```
filtered = companies.sort_values('max_corr',ascending=False)[['dataset_id', 'company_name','MarketCap', 'Sector', 'Symbol',
'max_corr', 'best_lag']]
filtered = filtered.query('(max_corr > 0.95) & (best_lag < -50)')
filtered.head()
```
Modelling for the top stock here USA Truck Inc.
```
USAK = stocks.USAK
USAK_link = link[link['dataset_id']==929840]['employees_on_platform']
start = min(USAK_link.index)
end = max(USAK_link.index)
fig, ax = plt.subplots(figsize=(12,8))
ax.set_xlim(start,end)
ax.plot(USAK.index,USAK, label='Adjusted Close Price (USAK)')
ax.set_ylabel('Adjusted close stock price')
ax1=ax.twinx()
ax1.set_ylabel('LinkedIn employee count')
ax1.plot(USAK_link.index, USAK_link,color='r',label='LinkedIn employee data')
plt.legend();
def build_t_feats(stock,employ,n, include_employ=True):
X = pd.concat([stock,employ],axis=1)
X.columns = ['close','emps']
y=None
#start = max(pd.datetime(2016,7,1),min(stock.dropna().index)) - pd.Timedelta(1, unit='d')
start = min(employ.dropna().index) - pd.Timedelta(1, unit='d')
end = max(stock.dropna().index)
#print(start,end)
X = X.loc[start:end]
# Normalize
X = (X-X.mean())/X.std()
# Fill gaps
X = X.interpolate()
# Daily returns
X = X.diff()
# Create target variable
X['y'] = X.close.shift(-1)
# Create time shifted features
for t in range(n):
X['c'+str(t+1)] = X.close.shift(t+1)
if include_employ: X['e'+str(t+1)] = X.emps.shift(t+1)
X = X.dropna()
if not include_employ: X = X.drop('emps',axis=1)
y = X.y
X.drop('y',axis=1,inplace=True)
return X,y
X, y = build_t_feats(USAK,USAK_link,180)
```
## Linear Model
Start with a basic linear model, so we can easily interpret the model outputs.
```
from sklearn.model_selection import TimeSeriesSplit, cross_val_score
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import mean_absolute_error
reg = Ridge()
def fit_predict(reg, X, y, plot=True):
cv = TimeSeriesSplit(n_splits=10)
scores = cross_val_score(reg, X, y, cv=cv, scoring='neg_mean_absolute_error')
if plot: print('Mean absolute error: ', np.mean(-scores), '\nSplit scores: ',-scores)
cut = int(X.shape[0]*0.9)
X_train, y_train = X[:cut], y[:cut].values.reshape(-1,1)
X_dev, y_dev = X[cut:], y[cut:].values.reshape(-1,1)
reg.fit(X_train,y_train)
pred_dev = reg.predict(X_dev)
pred_train = reg.predict(X_train)
if plot:
f,ax = plt.subplots(nrows=1,ncols=2,figsize=(25,8))
ax[0].plot(y_train,pred_train,marker='.',linestyle='None',alpha=0.6,label='train')
ax[0].plot(y_dev,pred_dev,marker='.',linestyle='None',color='r',alpha=0.6,label='dev')
ax[0].set_title('Predicted v actual daily changes')
ax[0].legend()
ax[1].plot(X[cut:].index,y_dev,alpha=0.6,label='actual',marker='.')
ax[1].plot(X[cut:].index,pred_dev,color='r',alpha=0.6,label='predict',marker='.')
ax[1].set_title('Development set, predicted v actual daily changes')
ax[1].legend();
return reg, np.mean(-scores)
reg, _ = fit_predict(reg, X, y)
```
Using MAE (Mean Absolute Error) as the evaluation metric here. Around 0.05 MAE seems acceptable at predicting the daily changes.
```
coefs = reg.coef_.ravel()
idx = coefs.argsort()[-40:]
x = np.arange(len(coefs[idx]))
fig,ax = plt.subplots(figsize=(20,5))
plt.bar(x,coefs[idx])
plt.xticks(x,X.columns.values[idx])
plt.title('Importance of shifted feature in model')
plt.show();
```
Looks like most of the top features are time lagged versions of the daily price change rather than the employment data.
## Same model excluding employment data
I'll now rerun the same analysis but exluced the employment data.
```
X, y = build_t_feats(USAK,USAK_link,180,include_employ=False)
reg = Ridge()
reg, _ = fit_predict(reg, X, y)
coefs = reg.coef_.ravel()
idx = coefs.argsort()[-40:]
x = np.arange(len(coefs[idx]))
fig,ax = plt.subplots(figsize=(20,5))
plt.bar(x,coefs[idx])
plt.xticks(x,X.columns.values[idx])
plt.title('Importance of shifted feature in model')
plt.show();
```
Over a similar time period it looks like our model performed better using employment data.
## Rerun analysis for all top stocks
```
filtered = filtered[filtered.dataset_id != 868877].copy()
MAEs = np.full((len(filtered),2),np.nan)
for i,ID in enumerate(filtered.dataset_id.values):
print(i, ID, filtered.set_index('dataset_id').loc[ID].company_name)
try:
sym = filtered.set_index('dataset_id').loc[ID].Symbol
tick = stocks[sym]
emp = link[link['dataset_id']==ID]['employees_on_platform']
except:
print('Symbol Error, Skipping')
# Including employee data
X, y = build_t_feats(tick,emp,180,True)
reg = Ridge()
reg, MAE = fit_predict(reg, X, y, plot=False)
MAEs[i][0] = MAE
# Excluding employee data
X, y = build_t_feats(tick,emp,180,False)
reg = Ridge()
reg, MAE = fit_predict(reg, X, y, plot=False)
MAEs[i][1] = MAE
filtered['MAE_w_emp'] = MAEs[:,0]
filtered['MAE_wo_emp'] = MAEs[:,1]
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import HoverTool
output_notebook()
TOOLS="hover,save"
p1 = figure(plot_width=600, plot_height=600, title="Prediction score with and without LinkedIn data",tools=TOOLS)
p1.xgrid.grid_line_color = None
p1.circle(x='MAE_wo_emp', y='MAE_w_emp', size=12, alpha=0.5, source=filtered)
p1.line(x=np.arange(0,0.25,0.01),y=np.arange(0,0.25,0.01))
p1.xaxis.axis_label = 'MAE with employee data in model'
p1.yaxis.axis_label = 'MAE without employee data in model'
hover = p1.select(dict(type=HoverTool))
hover.tooltips = [
("Name", "@company_name"),
("Correlation", "@max_corr"),
("Optimal Lag", "@best_lag"),
]
show(p1)
```
The vast majority of points fall below the line, meaning the predictions generated with a simple linear model were improved when Employee data was included in the model. **Therefore the LinkedIn data does improve the predictions of future stock price movements.**
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from glob import glob
# Utilities
from utils import *
%matplotlib inline
PATH = Path('D:\data\jobs')
link, companies, stocks = data_load(PATH)
filtered = companies.sort_values('max_corr',ascending=False)[['dataset_id', 'company_name','MarketCap', 'Sector', 'Symbol',
'max_corr', 'best_lag']]
filtered = filtered.query('(max_corr > 0.95) & (best_lag < -50)')
filtered.head()
USAK = stocks.USAK
USAK_link = link[link['dataset_id']==929840]['employees_on_platform']
start = min(USAK_link.index)
end = max(USAK_link.index)
fig, ax = plt.subplots(figsize=(12,8))
ax.set_xlim(start,end)
ax.plot(USAK.index,USAK, label='Adjusted Close Price (USAK)')
ax.set_ylabel('Adjusted close stock price')
ax1=ax.twinx()
ax1.set_ylabel('LinkedIn employee count')
ax1.plot(USAK_link.index, USAK_link,color='r',label='LinkedIn employee data')
plt.legend();
def build_t_feats(stock,employ,n, include_employ=True):
X = pd.concat([stock,employ],axis=1)
X.columns = ['close','emps']
y=None
#start = max(pd.datetime(2016,7,1),min(stock.dropna().index)) - pd.Timedelta(1, unit='d')
start = min(employ.dropna().index) - pd.Timedelta(1, unit='d')
end = max(stock.dropna().index)
#print(start,end)
X = X.loc[start:end]
# Normalize
X = (X-X.mean())/X.std()
# Fill gaps
X = X.interpolate()
# Daily returns
X = X.diff()
# Create target variable
X['y'] = X.close.shift(-1)
# Create time shifted features
for t in range(n):
X['c'+str(t+1)] = X.close.shift(t+1)
if include_employ: X['e'+str(t+1)] = X.emps.shift(t+1)
X = X.dropna()
if not include_employ: X = X.drop('emps',axis=1)
y = X.y
X.drop('y',axis=1,inplace=True)
return X,y
X, y = build_t_feats(USAK,USAK_link,180)
from sklearn.model_selection import TimeSeriesSplit, cross_val_score
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import mean_absolute_error
reg = Ridge()
def fit_predict(reg, X, y, plot=True):
cv = TimeSeriesSplit(n_splits=10)
scores = cross_val_score(reg, X, y, cv=cv, scoring='neg_mean_absolute_error')
if plot: print('Mean absolute error: ', np.mean(-scores), '\nSplit scores: ',-scores)
cut = int(X.shape[0]*0.9)
X_train, y_train = X[:cut], y[:cut].values.reshape(-1,1)
X_dev, y_dev = X[cut:], y[cut:].values.reshape(-1,1)
reg.fit(X_train,y_train)
pred_dev = reg.predict(X_dev)
pred_train = reg.predict(X_train)
if plot:
f,ax = plt.subplots(nrows=1,ncols=2,figsize=(25,8))
ax[0].plot(y_train,pred_train,marker='.',linestyle='None',alpha=0.6,label='train')
ax[0].plot(y_dev,pred_dev,marker='.',linestyle='None',color='r',alpha=0.6,label='dev')
ax[0].set_title('Predicted v actual daily changes')
ax[0].legend()
ax[1].plot(X[cut:].index,y_dev,alpha=0.6,label='actual',marker='.')
ax[1].plot(X[cut:].index,pred_dev,color='r',alpha=0.6,label='predict',marker='.')
ax[1].set_title('Development set, predicted v actual daily changes')
ax[1].legend();
return reg, np.mean(-scores)
reg, _ = fit_predict(reg, X, y)
coefs = reg.coef_.ravel()
idx = coefs.argsort()[-40:]
x = np.arange(len(coefs[idx]))
fig,ax = plt.subplots(figsize=(20,5))
plt.bar(x,coefs[idx])
plt.xticks(x,X.columns.values[idx])
plt.title('Importance of shifted feature in model')
plt.show();
X, y = build_t_feats(USAK,USAK_link,180,include_employ=False)
reg = Ridge()
reg, _ = fit_predict(reg, X, y)
coefs = reg.coef_.ravel()
idx = coefs.argsort()[-40:]
x = np.arange(len(coefs[idx]))
fig,ax = plt.subplots(figsize=(20,5))
plt.bar(x,coefs[idx])
plt.xticks(x,X.columns.values[idx])
plt.title('Importance of shifted feature in model')
plt.show();
filtered = filtered[filtered.dataset_id != 868877].copy()
MAEs = np.full((len(filtered),2),np.nan)
for i,ID in enumerate(filtered.dataset_id.values):
print(i, ID, filtered.set_index('dataset_id').loc[ID].company_name)
try:
sym = filtered.set_index('dataset_id').loc[ID].Symbol
tick = stocks[sym]
emp = link[link['dataset_id']==ID]['employees_on_platform']
except:
print('Symbol Error, Skipping')
# Including employee data
X, y = build_t_feats(tick,emp,180,True)
reg = Ridge()
reg, MAE = fit_predict(reg, X, y, plot=False)
MAEs[i][0] = MAE
# Excluding employee data
X, y = build_t_feats(tick,emp,180,False)
reg = Ridge()
reg, MAE = fit_predict(reg, X, y, plot=False)
MAEs[i][1] = MAE
filtered['MAE_w_emp'] = MAEs[:,0]
filtered['MAE_wo_emp'] = MAEs[:,1]
from bokeh.plotting import figure, show, output_notebook
from bokeh.models import HoverTool
output_notebook()
TOOLS="hover,save"
p1 = figure(plot_width=600, plot_height=600, title="Prediction score with and without LinkedIn data",tools=TOOLS)
p1.xgrid.grid_line_color = None
p1.circle(x='MAE_wo_emp', y='MAE_w_emp', size=12, alpha=0.5, source=filtered)
p1.line(x=np.arange(0,0.25,0.01),y=np.arange(0,0.25,0.01))
p1.xaxis.axis_label = 'MAE with employee data in model'
p1.yaxis.axis_label = 'MAE without employee data in model'
hover = p1.select(dict(type=HoverTool))
hover.tooltips = [
("Name", "@company_name"),
("Correlation", "@max_corr"),
("Optimal Lag", "@best_lag"),
]
show(p1)
| 0.421195 | 0.943608 |
# Performance optimization and analysis
In this tutorial, we will:
* learn how to optimize the performance of an `Operator`,
* investigate the effects of optimization in two real-life seismic inversion `Operator`s,
* analyze and interpret the performance report displayed after a run.
We will rely on preset models and `Operator`s from a seismic inversion problem based on an **isotropic acoustic wave equation**. To run one such `Operator`, in particular a forward modeling operator, we will exploit the `benchmark.py` module. This provides a number of options to configure the simulation and to try out different optimizations. The `benchmark.py` module is intended to let newcomers play with Devito -- and its performance optimizations! -- without having to know anything about its symbolic language, mechanisms and functioning.
```
import examples.seismic.benchmark
benchmark = examples.seismic.benchmark.__file__
# For a full list of options
%run $benchmark --help
```
OK, now we want Devito to run this `Operator`.
```
%run $benchmark run -P acoustic
```
That was simple. Of course, we may want to run the same simulation on a bigger grid, with different grid point spacing or space order, and so on. And yes, we'll do this later on in the tutorial. But before scaling up in problem size, we shall take a look at what sort of performance optimizations we'll be able to apply to speed it up.
In essence, there are four knobs we can play with to maximize the `Operator` performance (or to see how the performace varies when adding or removing specific transformations):
* parallelism,
* the Devito Symbolic Engine (DSE) optimization level,
* the Devito Loop Engine (DLE) optimization level,
* loop blocking auto-tuning.
## Shared-memory parallelism
We start with the most obvious -- parallelism. Devito implements shared-memory parallelism via OpenMP. To enable it, we would usually simply set the environment variable `DEVITO_OPENMP=1`. However, since we are running in a Jupyter notebook, we can change the relevant configuration option directly:
```
from devito import configuration
configuration['openmp'] = True
```
Multiple threads will now be used when running an `Operator`. But how many? And how efficiently? We may be running on a multi-socket node -- how should we treat it, as a "flatten system" or what?
Devito aims to use distributed-memory parallelism over multi-socket nodes; that is, it allocates one MPI process per socket, and each MPI process should spawn as many OpenMP threads as the number of cores on the socket. Users don't get all this for free, however; a minimal configuration effort is required. But don't worry: as you shall see, it's much simpler than it sounds!
For this tutorial, we forget about MPI; we rather focus on enabling OpenMP on a single socket. So, first of all, we want to restrain execution to a single socket -- we want threads to stay on that socket without ever migrating to other cores of the system due to OS scheduling. Are we really on a multi-socket node? And how many _physical_ cores does a socket have? Let's find out. We shall use a very standard tool such as `lscpu` on Linux systems.
```
! lscpu
```
A line beginning with `'NUMA node...'` represents one specific socket. Its value (on the right hand side, after the ':') indicates the ID ranges of its logical cores. For example, if our node consisted of two sockets, each socket having 16 physical cores and 2 hyperthreads per core, we would see something like
```
...
NUMA node0 CPU(s): 0-15,32-47
NUMA node1 CPU(s): 16-31,48-63
...
```
Now, say we choose to run on 16 cores of socket 0 (``node0``). We first have to set the following OpenMP environment variable:
```
%env OMP_NUM_THREADS=16
```
Thus, 16 threads will be spawned each time an `Operator` is run. They will be killed as soon as the `Operator` has done its job.
We also want to **bind** them to the physical cores of socket 0; that is, we want to prevent OS-induced migration. This is known as *thread pinning*. One may use a program such as ``numactl`` or, alternatively, exploit other OpenMP environment variables. If the Intel compiler is at our disposal, we can enforce pinning through the following two-step procedure:
* We tell Devito to use the Intel compiler through the special `DEVITO_ARCH` environment variable;
* We set the Intel-specific `KMP_HW_SUBSET` and `KMP_AFFINITY` environment variables.
```
# Thread pinning
%env KMP_HW_SUBSET=16c,1t
%env KMP_AFFINITY=compact
# Tell Devito to use the Intel compiler
%env DEVITO_ARCH=intel
# Or, analogously, using the configuration dictionary
configuration['compiler'] = 'intel'
```
If one didn't have access to the Intel compiler, it would still be possible to enable thread pinning through analogous mechanisms provided by OpenMP 4.5, namely the `OMP_PLACES` and `OMP_PROC_BIND` variables.
```
# Uncomment if necessary; note that the available GCC version must support OpenMP 4.5 for the following to have effect
# %env OMP_PROC_BIND=close
# %env OMP_PLACES=cores
# %env DEVITO_ARCH=gcc
```
Let's see how threading and pinning impact the `Operator` performance.
We run the isotropic acoustic forward operator again, but this time with a much larger grid, a 256x256x256 cube, and a more realistic space order, 8. We also shorten the duration by deliberately choosing a very small simulation end time (50 ms).
```
for i in range(3):
print ("Run %d" % i)
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50
```
Observation: the execution times are stable. This is a symptom that thread pinning is working. In practice, don't forget to check by taking a look at OpenMP reports or using profilers (e.g., Intel VTune) or through user-friendly tools such as `htop`.
## DSE - Devito Symbolic Engine
We know how to switch on parallelism. So, it's finally time to see what kind of optimizations can Devito perform. By default, Devito aggressively optimizes all `Operator`s. When running through `benchmark.py`, however, optimizations are left disabled until users explicitly request them. This, hopefully, simplifies initial experimentation and investigation.
Let's then dive into to the Devito Symbolic Engine (or DSE) section of this tutorial. It is worth observing that the DSE is one of the distinguishing features of Devito w.r.t. many other stencil frameworks! Why is that? This is what the documentation says about the DSE:
> [The DSE performs] Flop-count optimizations - They aim to reduce the operation count of an Operator. These include, for example, code motion, factorization, and detection of cross-stencil redundancies. The flop-count optimizations are performed by routines built on top of SymPy.
So the DSE reduces the flop count of `Operator`s. This is particularly useful in the case of complicated PDEs, for example those making extensive use of differential operators. And even more important in high order methods. In such cases, it's not unusual to end up with kernels requiring hundreds of arithmetic operations per grid point calculation. Since Devito doesn't make assumptions about the PDEs, the presence of an optimization system such as the DSE becomes of fundamental importance. In fact, we know that its impact has been remarkable in real-life siesmic inversion operators that have been written on top of Devito (e.g., TTI operators).
Let's see what happens enabling the DSE in our acoustic operator.
```
# Increase Devito logger verbosity to display useful information about the DSE
configuration['log_level'] = 'DEBUG'
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced
```
Compared to the previous runs, do you note any change ...
* in the Devito output reports?
* in execution times? why?
And why, from a performance analysis point of view, is the DSE useful even though no changes in execution times are observed?
## DLE - Devito Loop Engine
We know how to switch on parallelism and how to leverage the DSE to reduce the flop count of our stencil equations. What's still missing are SIMD vectorization and optimizations for data locality. We won't be able to reach a significant fraction of the attainable machine peak without aggressive loop transformations. Clearly, Devito users don't "see" loops -- in fact, they only write maths in Python! -- but the generated code is nothing more than classic C with loop nests for grid function updates. So how do these "low-level loops" get optimized? Quoting from the documentation:
> Loop optimizations - Examples include SIMD vectorization and loop blocking. These are performed by the Devito Loop Engine (DLE), a sub-module consisting of a sequence of compiler passes manipulating abstract syntax trees [...]
In other words, the Devito compiler, through the DLE module, automatically applies loop transformations. The **how it does that**(i.e., manipulation of an intermediate representation), here, is irrelevant; we rather want to understand **what the DLE can do**, **how to use** it and what kind of **tuning** is required to maximize the performance of an `Operator`. As we shall see, using and tuning the DLE will be as simple as switching on some special environment variables!
So here's a (non complete) list of transformations that the DLE will automatically apply for you:
* SIMD Vectorization
* Loop blocking
* Optimization of so called "remainder" loops
* Creation of elemental functions
OK, let's run the same problem we ran above, but this time with the DLE at maximum level (it's gonna apply **all** optimizations listed above). Can you guess whether the performance will be substantially better? Why?
```
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced -dle advanced
```
Can we make the `Operator` run quicker? Yes !
What we are missing so far is performance tuning. Take loop blocking, for example. This is a *parametric loop transformation*: its impact will vary depending on block shape, size and scheduling. In the literature, over the years, dozens of different loop blocking strategies have been studied! Even though we used the simplest loop blocking scheme on Earth, we would need **at least** to come up with a block size fitting in some level of cache. Obviously, this is such a low level detail... and we don't want users to waste any time on thinking about these matters.
Like other frameworks, Devito can automatically detect a "sufficiently good" block size through an *auto-tuning engine*. Let's try it out; in particular, we tell Devito to be "aggressive" in the search for block sizes. Being "aggressive" means that more time will be spent on auto-tuning, but there's a greater chance of retrieving a better candidate. We can either do this by setting an environment variable (`export DEVITO_AUTOTUNING=aggressive`) or directly through the configuration dictionary.
```
configuration['autotuning'] = 'aggressive'
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced -dle advanced -a
```
Note the addition of `-a` to the arguments list of our benchmarking script. This enables auto-tuning; the `aggressive` setting drives the auto-tuner search.
Do you note any difference in performance? Why?
## Exercise: performance analysis of a TTI forward operator
There's another operator that you can try running with our benchmarking script, namely a Tilted Transverse Isotropic operator for forward modeling. This one is even much more interesting than the isotropic acoustic one, as it's representative of a class of real-life wave modeling operators. The physics, and thus the equations, are more complicated, which results in computationally expensive numerical kernels. You can appreciate that yourself by skimming through the genereted code, whose location is displayed after JIT compilation.
Here's how to run TTI:
```
%run $benchmark run -P tti
```
This exercise asks you to repeat the same study we did for acoustic, playing with different DSE and DLE levels. Oh, and don't forget, there's another DSE level that you can try besides `advanced`, namely `aggressive`; just give it a go. How does the performance impact of DSE/DLE vary w.r.t. isotropic acoustic operator? And why?
## A sneak peek at the YASK backend
YASK -- Yet Another Stencil Kernel -- is, as described on its GitHub page
> a framework to facilitate exploration of the HPC stencil-performance design space.
The fundamental point is that YASK operates at a level of abstraction lower than that of Devito; for example, besides being a static system written in C++, no symbolic language is offered. We've been working on integrating YASK, meaning that, under the hood, Devito may "offload" (some of) the `Operator` stencil updates onto YASK. **This is totally transparent to users**; no changes to existing user code are required to exploit YASK! The goal is to create a synergy between the simplicity of use of Devito (i.e., the high-level abstractions) and a powerful optimization system such as YASK, specifically conceived to optimize stencil codes.
So, how do we try out YASK? Again, it's as difficult as setting one more environment variable: `DEVITO_BACKEND=yask`. In a notebook, we simply run the following:
```
configuration['backend'] = 'yask'
# Decrease Devito logger verbosity, as currently YASK produces lots of debug messages
configuration['log_level'] = 'INFO'
```
Now we can experiment a little with YASK. Watch out, however, as this is work in progress: the performance may still be quite far from the attainable peaks (e.g., Devito cannot still leverage YASK's auto-tuning system) and some `Operator`s are still unsupported, although our running example should just work out-of-the-box.
```
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced
```
Note that we didn't even have to specify the `-a` and `-dle advanced` options, because YASK takes responsability for loop optimization and auto-tuning.
<sup>This notebook is part of the tutorial "Optimised Symbolic Finite Difference Computation with Devito" presented at the University of Sao Paulo April 2019.</sup>
|
github_jupyter
|
import examples.seismic.benchmark
benchmark = examples.seismic.benchmark.__file__
# For a full list of options
%run $benchmark --help
%run $benchmark run -P acoustic
from devito import configuration
configuration['openmp'] = True
! lscpu
...
NUMA node0 CPU(s): 0-15,32-47
NUMA node1 CPU(s): 16-31,48-63
...
%env OMP_NUM_THREADS=16
# Thread pinning
%env KMP_HW_SUBSET=16c,1t
%env KMP_AFFINITY=compact
# Tell Devito to use the Intel compiler
%env DEVITO_ARCH=intel
# Or, analogously, using the configuration dictionary
configuration['compiler'] = 'intel'
# Uncomment if necessary; note that the available GCC version must support OpenMP 4.5 for the following to have effect
# %env OMP_PROC_BIND=close
# %env OMP_PLACES=cores
# %env DEVITO_ARCH=gcc
for i in range(3):
print ("Run %d" % i)
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50
# Increase Devito logger verbosity to display useful information about the DSE
configuration['log_level'] = 'DEBUG'
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced -dle advanced
configuration['autotuning'] = 'aggressive'
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced -dle advanced -a
%run $benchmark run -P tti
configuration['backend'] = 'yask'
# Decrease Devito logger verbosity, as currently YASK produces lots of debug messages
configuration['log_level'] = 'INFO'
%run $benchmark run -P acoustic -so 8 -d 256 256 256 --tn 50 -dse advanced
| 0.418459 | 0.981221 |
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
data = pd.read_csv('../Datasets/titanic.csv')
data.head()
def preprocess(data):
def fix_age(age):
if np.isnan(age):
return -1
else:
return age
data.loc[:, 'Gender'] = data.Gender.apply(lambda s: int(s == 'female'))
data.loc[:, 'Age'] = data.Age.apply(fix_age)
embarked = pd.get_dummies(data.Embarked, prefix='Emb')[['Emb_C','Emb_Q','Emb_S']]
cols = ['Pclass','Gender','Age','SibSp','Parch','Fare']
return pd.concat([data[cols], embarked], axis=1).values
train, val = train_test_split(data, test_size=0.2, random_state=11)
x_train = preprocess(train)
y_train = train['Survived'].values
x_val = preprocess(val)
y_val = val['Survived'].values
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
print(y_val.shape)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
dt_params = {
'criterion': 'entropy',
'random_state': 11
}
dt = DecisionTreeClassifier(**dt_params)
bc_params = {
'base_estimator': dt,
'n_estimators': 50,
'max_samples': 0.5,
'random_state': 11,
'n_jobs': -1
}
bc = BaggingClassifier(**bc_params)
bc.fit(x_train, y_train)
bc_preds_train = bc.predict(x_train)
bc_preds_val = bc.predict(x_val)
print('Bagging Classifier:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=bc_preds_train),
accuracy_score(y_true=y_val, y_pred=bc_preds_val)
))
dt.fit(x_train, y_train)
dt_preds_train = dt.predict(x_train)
dt_preds_val = dt.predict(x_val)
print('Decision Tree:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=dt_preds_train),
accuracy_score(y_true=y_val, y_pred=dt_preds_val)
))
from sklearn.ensemble import RandomForestClassifier
rf_params = {
'n_estimators': 100,
'criterion': 'entropy',
'max_features': 0.5,
'min_samples_leaf': 10,
'random_state': 11,
'n_jobs': -1
}
rf = RandomForestClassifier(**rf_params)
rf.fit(x_train, y_train)
rf_preds_train = rf.predict(x_train)
rf_preds_val = rf.predict(x_val)
print('Random Forest:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=rf_preds_train),
accuracy_score(y_true=y_val, y_pred=rf_preds_val)
))
from sklearn.ensemble import AdaBoostClassifier
dt_params = {
'max_depth': 1,
'random_state': 11
}
dt = DecisionTreeClassifier(**dt_params)
ab_params = {
'n_estimators': 100,
'base_estimator': dt,
'random_state': 11
}
ab = AdaBoostClassifier(**ab_params)
ab.fit(x_train, y_train)
ab_preds_train = ab.predict(x_train)
ab_preds_val = ab.predict(x_val)
print('Adaptive Boosting:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=ab_preds_train),
accuracy_score(y_true=y_val, y_pred=ab_preds_val)
))
ab_params = {
'base_estimator': dt,
'random_state': 11
}
n_estimator_values = list(range(10, 210, 10))
train_accuracies, val_accuracies = [], []
for n_estimators in n_estimator_values:
ab = AdaBoostClassifier(n_estimators=n_estimators, **ab_params)
ab.fit(x_train, y_train)
ab_preds_train = ab.predict(x_train)
ab_preds_val = ab.predict(x_val)
train_accuracies.append(accuracy_score(y_true=y_train, y_pred=ab_preds_train))
val_accuracies.append(accuracy_score(y_true=y_val, y_pred=ab_preds_val))
plt.figure(figsize=(10,7))
plt.plot(n_estimator_values, train_accuracies, label='Train')
plt.plot(n_estimator_values, val_accuracies, label='Validation')
plt.ylabel('Accuracy score')
plt.xlabel('n_estimators')
plt.legend()
plt.show()
from sklearn.ensemble import GradientBoostingClassifier
gbc_params = {
'n_estimators': 100,
'max_depth': 3,
'min_samples_leaf': 5,
'random_state': 11
}
gbc = GradientBoostingClassifier(**gbc_params)
gbc.fit(x_train, y_train)
gbc_preds_train = gbc.predict(x_train)
gbc_preds_val = gbc.predict(x_val)
print('Gradient Boosting Classifier:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=gbc_preds_train),
accuracy_score(y_true=y_val, y_pred=gbc_preds_val)
))
# Base models
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
# Stacking model
from sklearn.linear_model import LogisticRegression
x_train_with_metapreds = np.zeros((x_train.shape[0], x_train.shape[1]+2))
x_train_with_metapreds[:, :-2] = x_train
x_train_with_metapreds[:, -2:] = -1
print(x_train_with_metapreds)
kf = KFold(n_splits=5, random_state=11)
for train_indices, val_indices in kf.split(x_train):
kfold_x_train, kfold_x_val = x_train[train_indices], x_train[val_indices]
kfold_y_train, kfold_y_val = y_train[train_indices], y_train[val_indices]
svm = LinearSVC(random_state=11, max_iter=1000)
svm.fit(kfold_x_train, kfold_y_train)
svm_pred = svm.predict(kfold_x_val)
knn = KNeighborsClassifier(n_neighbors=4)
knn.fit(kfold_x_train, kfold_y_train)
knn_pred = knn.predict(kfold_x_val)
x_train_with_metapreds[val_indices, -2] = svm_pred
x_train_with_metapreds[val_indices, -1] = knn_pred
x_val_with_metapreds = np.zeros((x_val.shape[0], x_val.shape[1]+2))
x_val_with_metapreds[:, :-2] = x_val
x_val_with_metapreds[:, -2:] = -1
print(x_val_with_metapreds)
svm = LinearSVC(random_state=11, max_iter=1000)
svm.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=4)
knn.fit(x_train, y_train)
svm_pred = svm.predict(x_val)
knn_pred = knn.predict(x_val)
x_val_with_metapreds[:, -2] = svm_pred
x_val_with_metapreds[:, -1] = knn_pred
lr = LogisticRegression(random_state=11)
lr.fit(x_train_with_metapreds, y_train)
lr_preds_train = lr.predict(x_train_with_metapreds)
lr_preds_val = lr.predict(x_val_with_metapreds)
print('Stacked Classifier:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=lr_preds_train),
accuracy_score(y_true=y_val, y_pred=lr_preds_val)
))
# Comparing accuracy with that of base predictors
print('SVM:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=svm.predict(x_train)),
accuracy_score(y_true=y_val, y_pred=svm_pred)
))
print('kNN:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=knn.predict(x_train)),
accuracy_score(y_true=y_val, y_pred=knn_pred)
))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
data = pd.read_csv('../Datasets/titanic.csv')
data.head()
def preprocess(data):
def fix_age(age):
if np.isnan(age):
return -1
else:
return age
data.loc[:, 'Gender'] = data.Gender.apply(lambda s: int(s == 'female'))
data.loc[:, 'Age'] = data.Age.apply(fix_age)
embarked = pd.get_dummies(data.Embarked, prefix='Emb')[['Emb_C','Emb_Q','Emb_S']]
cols = ['Pclass','Gender','Age','SibSp','Parch','Fare']
return pd.concat([data[cols], embarked], axis=1).values
train, val = train_test_split(data, test_size=0.2, random_state=11)
x_train = preprocess(train)
y_train = train['Survived'].values
x_val = preprocess(val)
y_val = val['Survived'].values
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
print(y_val.shape)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
dt_params = {
'criterion': 'entropy',
'random_state': 11
}
dt = DecisionTreeClassifier(**dt_params)
bc_params = {
'base_estimator': dt,
'n_estimators': 50,
'max_samples': 0.5,
'random_state': 11,
'n_jobs': -1
}
bc = BaggingClassifier(**bc_params)
bc.fit(x_train, y_train)
bc_preds_train = bc.predict(x_train)
bc_preds_val = bc.predict(x_val)
print('Bagging Classifier:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=bc_preds_train),
accuracy_score(y_true=y_val, y_pred=bc_preds_val)
))
dt.fit(x_train, y_train)
dt_preds_train = dt.predict(x_train)
dt_preds_val = dt.predict(x_val)
print('Decision Tree:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=dt_preds_train),
accuracy_score(y_true=y_val, y_pred=dt_preds_val)
))
from sklearn.ensemble import RandomForestClassifier
rf_params = {
'n_estimators': 100,
'criterion': 'entropy',
'max_features': 0.5,
'min_samples_leaf': 10,
'random_state': 11,
'n_jobs': -1
}
rf = RandomForestClassifier(**rf_params)
rf.fit(x_train, y_train)
rf_preds_train = rf.predict(x_train)
rf_preds_val = rf.predict(x_val)
print('Random Forest:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=rf_preds_train),
accuracy_score(y_true=y_val, y_pred=rf_preds_val)
))
from sklearn.ensemble import AdaBoostClassifier
dt_params = {
'max_depth': 1,
'random_state': 11
}
dt = DecisionTreeClassifier(**dt_params)
ab_params = {
'n_estimators': 100,
'base_estimator': dt,
'random_state': 11
}
ab = AdaBoostClassifier(**ab_params)
ab.fit(x_train, y_train)
ab_preds_train = ab.predict(x_train)
ab_preds_val = ab.predict(x_val)
print('Adaptive Boosting:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=ab_preds_train),
accuracy_score(y_true=y_val, y_pred=ab_preds_val)
))
ab_params = {
'base_estimator': dt,
'random_state': 11
}
n_estimator_values = list(range(10, 210, 10))
train_accuracies, val_accuracies = [], []
for n_estimators in n_estimator_values:
ab = AdaBoostClassifier(n_estimators=n_estimators, **ab_params)
ab.fit(x_train, y_train)
ab_preds_train = ab.predict(x_train)
ab_preds_val = ab.predict(x_val)
train_accuracies.append(accuracy_score(y_true=y_train, y_pred=ab_preds_train))
val_accuracies.append(accuracy_score(y_true=y_val, y_pred=ab_preds_val))
plt.figure(figsize=(10,7))
plt.plot(n_estimator_values, train_accuracies, label='Train')
plt.plot(n_estimator_values, val_accuracies, label='Validation')
plt.ylabel('Accuracy score')
plt.xlabel('n_estimators')
plt.legend()
plt.show()
from sklearn.ensemble import GradientBoostingClassifier
gbc_params = {
'n_estimators': 100,
'max_depth': 3,
'min_samples_leaf': 5,
'random_state': 11
}
gbc = GradientBoostingClassifier(**gbc_params)
gbc.fit(x_train, y_train)
gbc_preds_train = gbc.predict(x_train)
gbc_preds_val = gbc.predict(x_val)
print('Gradient Boosting Classifier:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=gbc_preds_train),
accuracy_score(y_true=y_val, y_pred=gbc_preds_val)
))
# Base models
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
# Stacking model
from sklearn.linear_model import LogisticRegression
x_train_with_metapreds = np.zeros((x_train.shape[0], x_train.shape[1]+2))
x_train_with_metapreds[:, :-2] = x_train
x_train_with_metapreds[:, -2:] = -1
print(x_train_with_metapreds)
kf = KFold(n_splits=5, random_state=11)
for train_indices, val_indices in kf.split(x_train):
kfold_x_train, kfold_x_val = x_train[train_indices], x_train[val_indices]
kfold_y_train, kfold_y_val = y_train[train_indices], y_train[val_indices]
svm = LinearSVC(random_state=11, max_iter=1000)
svm.fit(kfold_x_train, kfold_y_train)
svm_pred = svm.predict(kfold_x_val)
knn = KNeighborsClassifier(n_neighbors=4)
knn.fit(kfold_x_train, kfold_y_train)
knn_pred = knn.predict(kfold_x_val)
x_train_with_metapreds[val_indices, -2] = svm_pred
x_train_with_metapreds[val_indices, -1] = knn_pred
x_val_with_metapreds = np.zeros((x_val.shape[0], x_val.shape[1]+2))
x_val_with_metapreds[:, :-2] = x_val
x_val_with_metapreds[:, -2:] = -1
print(x_val_with_metapreds)
svm = LinearSVC(random_state=11, max_iter=1000)
svm.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=4)
knn.fit(x_train, y_train)
svm_pred = svm.predict(x_val)
knn_pred = knn.predict(x_val)
x_val_with_metapreds[:, -2] = svm_pred
x_val_with_metapreds[:, -1] = knn_pred
lr = LogisticRegression(random_state=11)
lr.fit(x_train_with_metapreds, y_train)
lr_preds_train = lr.predict(x_train_with_metapreds)
lr_preds_val = lr.predict(x_val_with_metapreds)
print('Stacked Classifier:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=lr_preds_train),
accuracy_score(y_true=y_val, y_pred=lr_preds_val)
))
# Comparing accuracy with that of base predictors
print('SVM:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=svm.predict(x_train)),
accuracy_score(y_true=y_val, y_pred=svm_pred)
))
print('kNN:\n> Accuracy on training data = {:.4f}\n> Accuracy on validation data = {:.4f}'.format(
accuracy_score(y_true=y_train, y_pred=knn.predict(x_train)),
accuracy_score(y_true=y_val, y_pred=knn_pred)
))
| 0.506836 | 0.449816 |
<img src="https://github.com/pmservice/ai-openscale-tutorials/raw/master/notebooks/images/banner.png" align="left" alt="banner">
# Working with Watson Machine Learning
The notebook will train, create and deploy a Credit Risk model. It will then configure OpenScale to monitor drift in data and accuracy by injecting sample payloads for viewing in the OpenScale Insights dashboard.
### Contents
- [1. Setup](#setup)
- [2. Model building and deployment](#model)
- [3. OpenScale configuration](#openscale)
- [4. Generate drift model](#driftmodel)
- [5. Submit payload](#payload)
- [6. Enable drift monitoring](#monitor)
- [7. Run drift monitor](# )
# 1.0 Setup <a name="setup"></a>
## 1.1 Package installation
```
import warnings
warnings.filterwarnings('ignore')
!rm -rf /home/spark/shared/user-libs/python3.6*
!pip install --upgrade opt-einsum==2.3.2 --no-cache | tail -n 1
!pip install --upgrade typing-extensions==3.6.2.1 --no-cache | tail -n 1
!pip install --upgrade jupyter==1 --no-cache | tail -n 1
!pip install --upgrade tensorboard==1.15.0 | tail -n 1
!pip install --upgrade ibm-ai-openscale==2.2.1 --no-cache | tail -n 1
!pip install --upgrade JPype1-py3 | tail -n 1
!pip install --upgrade watson-machine-learning-client-V4==1.0.93 | tail -n 1
!pip install --upgrade numpy==1.18.3 --no-cache | tail -n 1
!pip install --upgrade SciPy==1.4.1 --no-cache | tail -n 1
!pip install --upgrade pyspark==2.3 | tail -n 1
!pip install --upgrade scikit-learn==0.20.3 | tail -n 1
!pip install --upgrade pandas==0.24.2 | tail -n 1
!pip install --upgrade ibm-wos-utils>=1.2.1
```
### Action: restart the kernel!
## 1.2 Configure credentials
- WOS_CREDENTIALS (ICP)
- WML_CREDENTIALS (ICP)
- DATABASE_CREDENTIALS (DB2 on ICP)
- SCHEMA_NAME
The url for `WOS_CREDENTIALS` is the url of the CP4D cluster, i.e. `https://zen-cpd-zen.apps.com`.
```
WOS_CREDENTIALS = {
"url": "********",
"username": "********",
"password": "********"
}
WML_CREDENTIALS = WOS_CREDENTIALS.copy()
WML_CREDENTIALS['instance_id']='openshift'
WML_CREDENTIALS['version']='3.0.0'
```
Provide `DATABASE_CREDENTIALS`. Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten.
```
DATABASE_CREDENTIALS = {
}
```
Provide SCHEMA_NAME
```
SCHEMA_NAME = ''
```
Provide a custom name to be concatenated to model name, deployment name and open scale monitor. Sample value for CUSTOM_NAME could be ```CUSTOM_NAME = 'SAMAYA_OPENSCALE_3.0'```
```
CUSTOM_NAME = 'SAMAYA-DRIFT'
```
# 2.0 Model building and deployment <a name="model"></a>
In this section you will learn how to train Spark MLLib model and next deploy it as web-service using Watson Machine Learning service.
## 2.1 Load the training data
```
import pandas as pd
!rm -rf german_credit_data_biased_training.csv
!wget https://raw.githubusercontent.com/IBM/cpd-intelligent-loan-agent-assets/master/data/german_credit_data_biased_training.csv -O german_credit_data_biased_training.csv
!ls -lh german_credit_data_biased_training.csv
data_df = pd.read_csv('german_credit_data_biased_training.csv', sep=",", header=0)
data_df.head()
from pyspark.sql import SparkSession
import json
spark = SparkSession.builder.getOrCreate()
df_data = spark.read.csv(path="german_credit_data_biased_training.csv", sep=",", header=True, inferSchema=True)
df_data.head()
```
## 2.2 Explore data
```
df_data.printSchema()
print("Number of records: " + str(df_data.count()))
```
## 2.3 Create a model
Choose a unique name (i.e. your name or initials) and a date or date-time for `MODEL_NAME` and `DEPLOYMENT_NAME`
```
MODEL_NAME = CUSTOM_NAME + "_MODEL"
DEPLOYMENT_NAME = CUSTOM_NAME + "_DEPLOYMENT"
spark_df = df_data
(train_data, test_data) = spark_df.randomSplit([0.8, 0.2], 24)
print("Number of records for training: " + str(train_data.count()))
print("Number of records for evaluation: " + str(test_data.count()))
spark_df.printSchema()
```
The code below creates a Random Forest Classifier with Spark, setting up string indexers for the categorical features and the label column. Finally, this notebook creates a pipeline including the indexers and the model, and does an initial Area Under ROC evaluation of the model.
```
from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml import Pipeline, Model
si_CheckingStatus = StringIndexer(inputCol = 'CheckingStatus', outputCol = 'CheckingStatus_IX')
si_CreditHistory = StringIndexer(inputCol = 'CreditHistory', outputCol = 'CreditHistory_IX')
si_LoanPurpose = StringIndexer(inputCol = 'LoanPurpose', outputCol = 'LoanPurpose_IX')
si_ExistingSavings = StringIndexer(inputCol = 'ExistingSavings', outputCol = 'ExistingSavings_IX')
si_EmploymentDuration = StringIndexer(inputCol = 'EmploymentDuration', outputCol = 'EmploymentDuration_IX')
si_Sex = StringIndexer(inputCol = 'Sex', outputCol = 'Sex_IX')
si_OthersOnLoan = StringIndexer(inputCol = 'OthersOnLoan', outputCol = 'OthersOnLoan_IX')
si_OwnsProperty = StringIndexer(inputCol = 'OwnsProperty', outputCol = 'OwnsProperty_IX')
si_InstallmentPlans = StringIndexer(inputCol = 'InstallmentPlans', outputCol = 'InstallmentPlans_IX')
si_Housing = StringIndexer(inputCol = 'Housing', outputCol = 'Housing_IX')
si_Job = StringIndexer(inputCol = 'Job', outputCol = 'Job_IX')
si_Telephone = StringIndexer(inputCol = 'Telephone', outputCol = 'Telephone_IX')
si_ForeignWorker = StringIndexer(inputCol = 'ForeignWorker', outputCol = 'ForeignWorker_IX')
si_Label = StringIndexer(inputCol="Risk", outputCol="label").fit(spark_df)
label_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=si_Label.labels)
va_features = VectorAssembler(inputCols=["CheckingStatus_IX", "CreditHistory_IX", "LoanPurpose_IX", "ExistingSavings_IX", "EmploymentDuration_IX", "Sex_IX", \
"OthersOnLoan_IX", "OwnsProperty_IX", "InstallmentPlans_IX", "Housing_IX", "Job_IX", "Telephone_IX", "ForeignWorker_IX", \
"LoanDuration", "LoanAmount", "InstallmentPercent", "CurrentResidenceDuration", "LoanDuration", "Age", "ExistingCreditsCount", \
"Dependents"], outputCol="features")
from pyspark.ml.classification import RandomForestClassifier
classifier = RandomForestClassifier(featuresCol="features")
pipeline = Pipeline(stages=[si_CheckingStatus, si_CreditHistory, si_EmploymentDuration, si_ExistingSavings, si_ForeignWorker, si_Housing, si_InstallmentPlans, si_Job, si_LoanPurpose, si_OthersOnLoan,\
si_OwnsProperty, si_Sex, si_Telephone, si_Label, va_features, classifier, label_converter])
model = pipeline.fit(train_data)
predictions = model.transform(test_data)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName='areaUnderROC')
area_under_curve = evaluatorDT.evaluate(predictions)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName='areaUnderPR')
area_under_PR = evaluatorDT.evaluate(predictions)
#default evaluation is areaUnderROC
print("areaUnderROC = %g" % area_under_curve, "areaUnderPR = %g" % area_under_PR)
```
### 2.4 evaluate more metrics by exporting them into pandas and numpy
```
from sklearn.metrics import classification_report
y_pred = predictions.toPandas()['prediction']
y_pred = ['Risk' if pred == 1.0 else 'No Risk' for pred in y_pred]
y_test = test_data.toPandas()['Risk']
print(classification_report(y_test, y_pred, target_names=['Risk', 'No Risk']))
```
## 2.5 Publish the model
In this section, the notebook uses Watson Machine Learning to save the model (including the pipeline) to the WML instance. Previous versions of the model are removed so that the notebook can be run again, resetting all data for another demo.
```
from watson_machine_learning_client import WatsonMachineLearningAPIClient
import json
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
```
### 2.5.1 Set default space
This is a new feature in CP4D, in order to deploy a model, you would have to create different
deployment spaces and deploy your models there. You can list all the spaces using the .list()
function, or you can create new spaces by going to CP4D menu on top left corner --> analyze -->
analytics deployments --> New Deployment Space. Once you know which space you want to deploy
in, simply use the GUID of the space as argument for .set.default_space() function below
```
wml_client.spaces.list()
```
We'll use the `GUID` for your Deployment space as listed for the `default_space` in the method below:
```
wml_client.set.default_space('346b75fd-018d-4465-8cb8-0985406cfdee')
```
Alternately, set `space_name` below and use the following cell to create a space with that name
```
# space_name = "my_space_name"
# spaces = wml_client.spaces.get_details()['resources']
# space_id = None
# for space in spaces:
# if space['entity']['name'] == space_name:
# space_id = space["metadata"]["guid"]
# if space_id is None:
# space_id = wml_client.spaces.store(
# meta_props={wml_client.spaces.ConfigurationMetaNames.NAME: space_name})["metadata"]["guid"]
#wml_client.set.default_space(space_id)
```
### 2.5.2 Remove existing model and deployment
```
deployment_details = wml_client.deployments.get_details()
for deployment in deployment_details['resources']:
deployment_id = deployment['metadata']['guid']
model_id = deployment['entity']['asset']['href'].split('/')[3].split('?')[0]
if deployment['entity']['name'] == DEPLOYMENT_NAME:
print('Deleting deployment id', deployment_id)
wml_client.deployments.delete(deployment_id)
print('Deleting model id', model_id)
wml_client.repository.delete(model_id)
wml_client.repository.list_models()
```
### 2.5.3 Set `training_data_reference`
```
training_data_reference = {
"name": "Credit Risk feedback",
"connection": DATABASE_CREDENTIALS,
"source": {
"tablename": "CREDIT_RISK_TRAINING",
'schema_name': 'TRAININGDATA',
"type": "db2"
}
}
```
### 2.5.4 Store the model in Watson Machine Learning on CP4D
```
wml_models = wml_client.repository.get_model_details()
model_uid = None
for model_in in wml_models['resources']:
if MODEL_NAME == model_in['entity']['name']:
model_uid = model_in['metadata']['guid']
break
if model_uid is None:
print("Storing model ...")
metadata = {
wml_client.repository.ModelMetaNames.NAME: MODEL_NAME,
wml_client.repository.ModelMetaNames.TYPE: 'mllib_2.3',
wml_client.repository.ModelMetaNames.RUNTIME_UID: 'spark-mllib_2.3',
}
published_model_details = wml_client.repository.store_model(model, metadata, training_data=df_data, pipeline=pipeline)
model_uid = wml_client.repository.get_model_uid(published_model_details)
print("Done")
model_uid
```
## 2.6 Deploy the model
The next section of the notebook deploys the model as a RESTful web service in Watson Machine Learning. The deployed model will have a scoring URL you can use to send data to the model for predictions.
```
wml_deployments = wml_client.deployments.get_details()
deployment_uid = None
for deployment in wml_deployments['resources']:
if DEPLOYMENT_NAME == deployment['entity']['name']:
deployment_uid = deployment['metadata']['guid']
break
if deployment_uid is None:
print("Deploying model...")
meta_props = {
wml_client.deployments.ConfigurationMetaNames.NAME: DEPLOYMENT_NAME,
wml_client.deployments.ConfigurationMetaNames.ONLINE: {}
}
deployment = wml_client.deployments.create(artifact_uid=model_uid, meta_props=meta_props)
deployment_uid = wml_client.deployments.get_uid(deployment)
print("Model id: {}".format(model_uid))
print("Deployment id: {}".format(deployment_uid))
```
# 3.0 Configure OpenScale <a name="openscale"></a>
The notebook will now import the necessary libraries and set up a Python OpenScale client.
```
from ibm_ai_openscale import APIClient4ICP
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
ai_client = APIClient4ICP(WOS_CREDENTIALS)
ai_client.version
```
## 3.1 Create datamart
### 3.1.1 Set up datamart
Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten.
Prior instances of the Credit model will be removed from OpenScale monitoring.
```
try:
data_mart_details = ai_client.data_mart.get_details()
print('Using existing external datamart')
except:
print('Setting up external datamart')
ai_client.data_mart.setup(db_credentials=DATABASE_CREDENTIALS, schema=SCHEMA_NAME)
data_mart_details = ai_client.data_mart.get_details()
```
## 3.2 Bind machine learning engines
Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model. If this binding already exists, this code will output a warning message and use the existing binding.
```
binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance4ICP(wml_credentials=WML_CREDENTIALS))
if binding_uid is None:
binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid']
bindings_details = ai_client.data_mart.bindings.get_details()
binding_uid
ai_client.data_mart.bindings.list()
```
## 3.3 Subscriptions
```
ai_client.data_mart.bindings.list_assets()
ai_client.data_mart.bindings.get_details(binding_uid)
```
### 3.3.1 Remove existing credit risk subscriptions
This code removes previous subscriptions to the Credit model to refresh the monitors with the new model and new data.
```
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for subscription in subscriptions_uids:
sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name']
if sub_name == MODEL_NAME:
ai_client.data_mart.subscriptions.delete(subscription)
print('Deleted existing subscription for', MODEL_NAME)
```
This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself.
```
subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
model_uid,
problem_type=ProblemType.BINARY_CLASSIFICATION,
input_data_type=InputDataType.STRUCTURED,
label_column='Risk',
prediction_column='predictedLabel',
probability_column='probability',
feature_columns = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
categorical_columns = ["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"]
))
if subscription is None:
print('Subscription already exists; get the existing one')
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == MODEL_NAME:
subscription = ai_client.data_mart.subscriptions.get(sub)
```
Get subscription list
```
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
ai_client.data_mart.subscriptions.list()
subscription_details = subscription.get_details()
```
# 4.0 Generate drift model <a name="driftmodel"></a>
Drift requires a trained model to be uploaded manually for WML. You can train, create and download a drift detection model using the code below. The entire code can be found [here](https://github.com/IBM-Watson/aios-data-distribution/blob/master/training_statistics_notebook.ipynb) ( check for Drift detection model generation).
```
training_data_info = {
"class_label":'Risk',
"feature_columns":["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
"categorical_columns":["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"]
}
#Set model_type. Acceptable values are:["binary","multiclass","regression"]
model_type = "binary"
#model_type = "multiclass"
#model_type = "regression"
def score(training_data_frame):
#To be filled by the user
WML_CREDENTAILS = WML_CREDENTIALS
#The data type of the label column and prediction column should be same .
#User needs to make sure that label column and prediction column array should have the same unique class labels
prediction_column_name = "predictedLabel"
probability_column_name = "probability"
feature_columns = list(training_data_frame.columns)
training_data_rows = training_data_frame[feature_columns].values.tolist()
#print(training_data_rows)
payload_scoring = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [{
"fields": feature_columns,
"values": [x for x in training_data_rows]
}]
}
score = wml_client.deployments.score(deployment_uid, payload_scoring)
score_predictions = score.get('predictions')[0]
prob_col_index = list(score_predictions.get('fields')).index(probability_column_name)
predict_col_index = list(score_predictions.get('fields')).index(prediction_column_name)
if prob_col_index < 0 or predict_col_index < 0:
raise Exception("Missing prediction/probability column in the scoring response")
import numpy as np
probability_array = np.array([value[prob_col_index] for value in score_predictions.get('values')])
prediction_vector = np.array([value[predict_col_index] for value in score_predictions.get('values')])
return probability_array, prediction_vector
#Generate drift detection model
from ibm_wos_utils.drift.drift_trainer import DriftTrainer
drift_detection_input = {
"feature_columns":training_data_info.get('feature_columns'),
"categorical_columns":training_data_info.get('categorical_columns'),
"label_column": training_data_info.get('class_label'),
"problem_type": model_type
}
drift_trainer = DriftTrainer(data_df,drift_detection_input)
if model_type != "regression":
#Note: batch_size can be customized by user as per the training data size
drift_trainer.generate_drift_detection_model(score,batch_size=data_df.shape[0])
#Note: Two column constraints are not computed beyond two_column_learner_limit(default set to 200)
#User can adjust the value depending on the requirement
drift_trainer.learn_constraints(two_column_learner_limit=200)
drift_trainer.create_archive()
#Generate a download link for drift detection model
from IPython.display import HTML
import base64
import io
def create_download_link_for_ddm( title = "Download Drift detection model", filename = "drift_detection_model.tar.gz"):
#Retains stats information
with open(filename,'rb') as file:
ddm = file.read()
b64 = base64.b64encode(ddm)
payload = b64.decode()
html = '<a download="{filename}" href="data:text/json;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html)
create_download_link_for_ddm()
#!rm -rf drift_detection_model.tar.gz
#!wget -O drift_detection_model.tar.gz https://github.com/IBM/cpd-intelligent-loan-agent-assets/blob/master/models/drift_detection_model.tar.gz?raw=true
```
# 5.0 Submit payload <a name="payload"></a>
### Score the model so we can configure monitors
Now that the WML service has been bound and the subscription has been created, we need to send a request to the model before we configure OpenScale. This allows OpenScale to create a payload log in the datamart with the correct schema, so it can capture data coming into and out of the model.
```
fields = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"]
values = [
["no_checking",13,"credits_paid_to_date","car_new",1343,"100_to_500","1_to_4",2,"female","none",3,"savings_insurance",46,"none","own",2,"skilled",1,"none","yes"],
["no_checking",24,"prior_payments_delayed","furniture",4567,"500_to_1000","1_to_4",4,"male","none",4,"savings_insurance",36,"none","free",2,"management_self-employed",1,"none","yes"],
["0_to_200",26,"all_credits_paid_back","car_new",863,"less_100","less_1",2,"female","co-applicant",2,"real_estate",38,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",14,"no_credits","car_new",2368,"less_100","1_to_4",3,"female","none",3,"real_estate",29,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",4,"no_credits","car_new",250,"less_100","unemployed",2,"female","none",3,"real_estate",23,"none","rent",1,"management_self-employed",1,"none","yes"],
["no_checking",17,"credits_paid_to_date","car_new",832,"100_to_500","1_to_4",2,"male","none",2,"real_estate",42,"none","own",1,"skilled",1,"none","yes"],
["no_checking",33,"outstanding_credit","appliances",5696,"unknown","greater_7",4,"male","co-applicant",4,"unknown",54,"none","free",2,"skilled",1,"yes","yes"],
["0_to_200",13,"prior_payments_delayed","retraining",1375,"100_to_500","4_to_7",3,"male","none",3,"real_estate",37,"none","own",2,"management_self-employed",1,"none","yes"]
]
payload_scoring = {"fields": fields,"values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
print('Single record scoring result:', '\n fields:', scoring_response['predictions'][0]['fields'], '\n values: ', scoring_response['predictions'][0]['values'][0])
```
# 6. Enable drift monitoring <a name="monitor"></a>
```
subscription.drift_monitoring.enable(threshold=0.05, min_records=10,model_path="drift_detection_model.tar.gz")
```
# 7. Run Drift monitor on demand <a name="driftrun"></a>
```
!rm german_credit_feed.json
!wget https://raw.githubusercontent.com/IBM/cpd-intelligent-loan-agent-assets/master/data/german_credit_feed.json
import random
with open('german_credit_feed.json', 'r') as scoring_file:
scoring_data = json.load(scoring_file)
fields = scoring_data['fields']
values = []
for _ in range(10):
current = random.choice(scoring_data['values'])
#set age of all rows to 100 to increase drift values on dashboard
current[12] = 100
values.append(current)
payload_scoring = {"fields": fields, "values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
drift_run_details = subscription.drift_monitoring.run(background_mode=False)
subscription.drift_monitoring.get_table_content()
```
## Congratulations!
You have finished running all the cells within the notebook for IBM Watson OpenScale. You can now view the OpenScale dashboard by going to the CP4D `Home` page, and clicking `Services`. Choose the `OpenScale` tile and click the menu to `Open`. Click on the tile for the model you've created to see fairness, accuracy, and performance monitors. Click on the timeseries graph to get detailed information on transactions during a specific time window.
OpenScale shows model performance over time. You have two options to keep data flowing to your OpenScale graphs:
* Download, configure and schedule the [model feed notebook](https://raw.githubusercontent.com/emartensibm/german-credit/master/german_credit_scoring_feed.ipynb). This notebook can be set up with your WML credentials, and scheduled to provide a consistent flow of scoring requests to your model, which will appear in your OpenScale monitors.
* Re-run this notebook. Running this notebook from the beginning will delete and re-create the model and deployment, and re-create the historical data. Please note that the payload and measurement logs for the previous deployment will continue to be stored in your datamart, and can be deleted if necessary.
|
github_jupyter
|
import warnings
warnings.filterwarnings('ignore')
!rm -rf /home/spark/shared/user-libs/python3.6*
!pip install --upgrade opt-einsum==2.3.2 --no-cache | tail -n 1
!pip install --upgrade typing-extensions==3.6.2.1 --no-cache | tail -n 1
!pip install --upgrade jupyter==1 --no-cache | tail -n 1
!pip install --upgrade tensorboard==1.15.0 | tail -n 1
!pip install --upgrade ibm-ai-openscale==2.2.1 --no-cache | tail -n 1
!pip install --upgrade JPype1-py3 | tail -n 1
!pip install --upgrade watson-machine-learning-client-V4==1.0.93 | tail -n 1
!pip install --upgrade numpy==1.18.3 --no-cache | tail -n 1
!pip install --upgrade SciPy==1.4.1 --no-cache | tail -n 1
!pip install --upgrade pyspark==2.3 | tail -n 1
!pip install --upgrade scikit-learn==0.20.3 | tail -n 1
!pip install --upgrade pandas==0.24.2 | tail -n 1
!pip install --upgrade ibm-wos-utils>=1.2.1
WOS_CREDENTIALS = {
"url": "********",
"username": "********",
"password": "********"
}
WML_CREDENTIALS = WOS_CREDENTIALS.copy()
WML_CREDENTIALS['instance_id']='openshift'
WML_CREDENTIALS['version']='3.0.0'
DATABASE_CREDENTIALS = {
}
SCHEMA_NAME = ''
# 2.0 Model building and deployment <a name="model"></a>
In this section you will learn how to train Spark MLLib model and next deploy it as web-service using Watson Machine Learning service.
## 2.1 Load the training data
## 2.2 Explore data
## 2.3 Create a model
Choose a unique name (i.e. your name or initials) and a date or date-time for `MODEL_NAME` and `DEPLOYMENT_NAME`
The code below creates a Random Forest Classifier with Spark, setting up string indexers for the categorical features and the label column. Finally, this notebook creates a pipeline including the indexers and the model, and does an initial Area Under ROC evaluation of the model.
### 2.4 evaluate more metrics by exporting them into pandas and numpy
## 2.5 Publish the model
In this section, the notebook uses Watson Machine Learning to save the model (including the pipeline) to the WML instance. Previous versions of the model are removed so that the notebook can be run again, resetting all data for another demo.
### 2.5.1 Set default space
This is a new feature in CP4D, in order to deploy a model, you would have to create different
deployment spaces and deploy your models there. You can list all the spaces using the .list()
function, or you can create new spaces by going to CP4D menu on top left corner --> analyze -->
analytics deployments --> New Deployment Space. Once you know which space you want to deploy
in, simply use the GUID of the space as argument for .set.default_space() function below
We'll use the `GUID` for your Deployment space as listed for the `default_space` in the method below:
Alternately, set `space_name` below and use the following cell to create a space with that name
### 2.5.2 Remove existing model and deployment
### 2.5.3 Set `training_data_reference`
### 2.5.4 Store the model in Watson Machine Learning on CP4D
## 2.6 Deploy the model
The next section of the notebook deploys the model as a RESTful web service in Watson Machine Learning. The deployed model will have a scoring URL you can use to send data to the model for predictions.
# 3.0 Configure OpenScale <a name="openscale"></a>
The notebook will now import the necessary libraries and set up a Python OpenScale client.
## 3.1 Create datamart
### 3.1.1 Set up datamart
Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten.
Prior instances of the Credit model will be removed from OpenScale monitoring.
## 3.2 Bind machine learning engines
Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model. If this binding already exists, this code will output a warning message and use the existing binding.
## 3.3 Subscriptions
### 3.3.1 Remove existing credit risk subscriptions
This code removes previous subscriptions to the Credit model to refresh the monitors with the new model and new data.
This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself.
Get subscription list
# 4.0 Generate drift model <a name="driftmodel"></a>
Drift requires a trained model to be uploaded manually for WML. You can train, create and download a drift detection model using the code below. The entire code can be found [here](https://github.com/IBM-Watson/aios-data-distribution/blob/master/training_statistics_notebook.ipynb) ( check for Drift detection model generation).
# 5.0 Submit payload <a name="payload"></a>
### Score the model so we can configure monitors
Now that the WML service has been bound and the subscription has been created, we need to send a request to the model before we configure OpenScale. This allows OpenScale to create a payload log in the datamart with the correct schema, so it can capture data coming into and out of the model.
# 6. Enable drift monitoring <a name="monitor"></a>
# 7. Run Drift monitor on demand <a name="driftrun"></a>
| 0.634656 | 0.905322 |
# 京东JData算法大赛(3): 探索高潜用户的行为
比赛的题目是高潜用户购买意向预测, 那么理解清楚**什么是高潜用户**对于数据分析,特征抽取,以及之后的建立模型有着至关重要的作用.
简单来讲,作为训练集的高潜用户应该具有以下特征:
- 必须有购买行为
- 对一个商品购买和其他交互行为(浏览,点击,收藏等)时间差应该**多于一天**
因为根据赛题,我们需要预测未来5天的购买情况,那么如果用户对某商品在同一天完成所有的交互行为(包括购买),
我们无法从这种交易中指导未来的预测.
**更新Note**: 可能存在买了又买的情况,这部分用户也不可忽略.
那么接下来,让我们先尝试找出这些高潜用户,之后对他们的行为做一些数据分析.
```
# 导入相关包
%matplotlib inline
# 绘图包
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# 定义文件名
ACTION_201602_FILE = "data/JData_Action_201602.csv"
ACTION_201603_FILE = "data/JData_Action_201603.csv"
ACTION_201603_EXTRA_FILE = "data/JData_Action_201603_extra.csv"
ACTION_201604_FILE = "data/JData_Action_201604.csv"
COMMENT_FILE = "data/JData_Comment.csv"
PRODUCT_FILE = "data/JData_Product.csv"
USER_FILE = "data/JData_User.csv"
NEW_USER_FILE = "data/JData_User_New.csv"
USER_TABLE_FILE = "data/user_table.csv"
BUY_USER_LIST_FILE = "data/buy_user_list.csv"
PROTENTIAL_USER_RECORD = "data/protential_user_record.csv"
```
### 寻找具有购买记录的用户
```
# 在一个文件中寻找有购买记录的用户-商品对
def buy_user_in_batch_data(fname, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "type"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
# type = 4, 购买
df_ac = df_ac[df_ac['type'] == 4][["user_id", "sku_id"]]
return df_ac
# 找出有购买记录的用户,并写到csv文件
def find_buy_user():
df_ac = []
df_ac.append(buy_user_in_batch_data(fname=ACTION_201602_FILE))
df_ac.append(buy_user_in_batch_data(fname=ACTION_201603_FILE))
df_ac.append(buy_user_in_batch_data(fname=ACTION_201603_EXTRA_FILE))
df_ac.append(buy_user_in_batch_data(fname=ACTION_201604_FILE))
# 将多个子记录合并成一个dataframe
df_ac = pd.concat(df_ac, ignore_index=True)
# 将重复的用户-商品对丢弃
df_ac = df_ac.drop_duplicates()
# 写入文件
df_ac.to_csv(BUY_USER_LIST_FILE, index=False)
# 执行程序
find_buy_user()
# 在一个文件中寻找与给定的user-item对有关的所有记录
def ui_record_in_batch_data(fname, ui_pair, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "time", "type"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
df = []
for index, row in ui_pair.iterrows():
usr_id = row["user_id"]
sku_id = row["sku_id"]
# 寻找与user-item对有关的所有记录
df.append(df_ac[(df_ac["user_id"] == usr_id) &
(df_ac["sku_id"] == sku_id)])
df = pd.concat(df, ignore_index=True)
return df
# apply功能函数:根据一个user-item对的所有记录,计算当前是否是高潜用户
def more_than_a_day(group):
# 最后一次购买该商品的日期
last_buy_day = max(group[group["type"] == 4]["date"])
# 最早与该商品发生交互的日期
earliest_behave_day = min(group["date"])
# 如果间隔不小于1天,则认为是高潜用户
if (last_buy_day - earliest_behave_day).days > 0:
# 字段potential_flag代表是否是高潜用户
group["potential_flag"] = 1
else:
group["potential_flag"] = 0
return group
# 寻找高潜用户,并将相关行为记录写入文件
def find_potential_user():
# 有购买行为的user-item对
ui_pair = pd.read_csv(BUY_USER_LIST_FILE, header=0)
df_ac = []
df_ac.append(ui_record_in_batch_data(ACTION_201602_FILE, ui_pair))
df_ac.append(ui_record_in_batch_data(fname=ACTION_201603_FILE))
df_ac.append(ui_record_in_batch_data(fname=ACTION_201603_EXTRA_FILE))
df_ac.append(ui_record_in_batch_data(fname=ACTION_201604_FILE))
df_ac = pd.concat(df_ac, ignore_index=True)
# 丢弃重复的
df_ac = df_ac.drop_duplicates()
# 增加日期属性
df_ac['date'] = pd.to_datetime(df_ac['time']).dt.date
df_ac = df_ac.groupby(["user_id", "sku_id"]).apply(more_than_a_day)
# 找出高潜用户
df_ac = df_ac[df_ac["potential_flag"] == 1]
# 写入文件
df_ac.to_csv(PROTENTIAL_USER_RECORD, index=False)
# 执行程序
find_potential_user()
```
### 高潜用户行为轨迹分析
间隔几天的所有交互次数曲线图(可能存在衰减)
```
ui_record = pd.read_csv(PROTENTIAL_USER_RECORD, header=0)
ui_record.head()
# 以某个用户为例
user_id = 62969
cu_record = ui_record[(ui_record['user_id'] == user_id)]
cu_record.tail()
time_range = pd.to_datetime(cu_record['time']).map(lambda x: x.strftime('%m-%d %H:%M'))
x_index = range(len(cu_record['type']))
# 设置图片大小
plt.figure(figsize=(18,5))
plt.scatter(x_index, cu_record['type'],c=cu_record['type'], s=36, lw=0, cmap=plt.cm.coolwarm)
plt.plot(x_index, cu_record['type'], 'y--', markersize=1)
plt.xlim(min(x_index) - 1, max(x_index) + 1)
plt.ylim(0, 7)
plt.xlabel('number')
plt.ylabel('behavior')
# plt.xticks(range(len(cu_record['type'])), time_range, rotation='vertical', fontsize=8)
plt.yticks(range(0,8), ["","browse","add cart","del cart","buy","favor", "click"])
plt.tight_layout()
# 以某个用户对某个商品为例
item_id = 51916
cu_record = ui_record[(ui_record['user_id'] == user_id) & (ui_record['sku_id'] == item_id)]
time_range = pd.to_datetime(cu_record['time']).map(lambda x: x.strftime('%m-%d %H:%M'))
x_index = range(len(cu_record['type']))
# 设置图片大小
plt.figure(figsize=(12,5))
plt.scatter(x_index, cu_record['type'],c=cu_record['type'], s=80, lw=0, cmap=plt.cm.rainbow)
plt.plot(x_index, cu_record['type'], 'y--', markersize=1)
plt.xlim(min(x_index) - 1, max(x_index) + 1)
plt.ylim(0, 7)
plt.xlabel('time')
plt.ylabel('behavior')
plt.xticks(range(len(cu_record['type'])), time_range, rotation='vertical', fontsize=8)
plt.yticks(range(0,8), ["","browse","add cart","del cart","buy","favor", "click"])
plt.tight_layout()
```
|
github_jupyter
|
# 导入相关包
%matplotlib inline
# 绘图包
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# 定义文件名
ACTION_201602_FILE = "data/JData_Action_201602.csv"
ACTION_201603_FILE = "data/JData_Action_201603.csv"
ACTION_201603_EXTRA_FILE = "data/JData_Action_201603_extra.csv"
ACTION_201604_FILE = "data/JData_Action_201604.csv"
COMMENT_FILE = "data/JData_Comment.csv"
PRODUCT_FILE = "data/JData_Product.csv"
USER_FILE = "data/JData_User.csv"
NEW_USER_FILE = "data/JData_User_New.csv"
USER_TABLE_FILE = "data/user_table.csv"
BUY_USER_LIST_FILE = "data/buy_user_list.csv"
PROTENTIAL_USER_RECORD = "data/protential_user_record.csv"
# 在一个文件中寻找有购买记录的用户-商品对
def buy_user_in_batch_data(fname, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "type"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
# type = 4, 购买
df_ac = df_ac[df_ac['type'] == 4][["user_id", "sku_id"]]
return df_ac
# 找出有购买记录的用户,并写到csv文件
def find_buy_user():
df_ac = []
df_ac.append(buy_user_in_batch_data(fname=ACTION_201602_FILE))
df_ac.append(buy_user_in_batch_data(fname=ACTION_201603_FILE))
df_ac.append(buy_user_in_batch_data(fname=ACTION_201603_EXTRA_FILE))
df_ac.append(buy_user_in_batch_data(fname=ACTION_201604_FILE))
# 将多个子记录合并成一个dataframe
df_ac = pd.concat(df_ac, ignore_index=True)
# 将重复的用户-商品对丢弃
df_ac = df_ac.drop_duplicates()
# 写入文件
df_ac.to_csv(BUY_USER_LIST_FILE, index=False)
# 执行程序
find_buy_user()
# 在一个文件中寻找与给定的user-item对有关的所有记录
def ui_record_in_batch_data(fname, ui_pair, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "time", "type"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
df = []
for index, row in ui_pair.iterrows():
usr_id = row["user_id"]
sku_id = row["sku_id"]
# 寻找与user-item对有关的所有记录
df.append(df_ac[(df_ac["user_id"] == usr_id) &
(df_ac["sku_id"] == sku_id)])
df = pd.concat(df, ignore_index=True)
return df
# apply功能函数:根据一个user-item对的所有记录,计算当前是否是高潜用户
def more_than_a_day(group):
# 最后一次购买该商品的日期
last_buy_day = max(group[group["type"] == 4]["date"])
# 最早与该商品发生交互的日期
earliest_behave_day = min(group["date"])
# 如果间隔不小于1天,则认为是高潜用户
if (last_buy_day - earliest_behave_day).days > 0:
# 字段potential_flag代表是否是高潜用户
group["potential_flag"] = 1
else:
group["potential_flag"] = 0
return group
# 寻找高潜用户,并将相关行为记录写入文件
def find_potential_user():
# 有购买行为的user-item对
ui_pair = pd.read_csv(BUY_USER_LIST_FILE, header=0)
df_ac = []
df_ac.append(ui_record_in_batch_data(ACTION_201602_FILE, ui_pair))
df_ac.append(ui_record_in_batch_data(fname=ACTION_201603_FILE))
df_ac.append(ui_record_in_batch_data(fname=ACTION_201603_EXTRA_FILE))
df_ac.append(ui_record_in_batch_data(fname=ACTION_201604_FILE))
df_ac = pd.concat(df_ac, ignore_index=True)
# 丢弃重复的
df_ac = df_ac.drop_duplicates()
# 增加日期属性
df_ac['date'] = pd.to_datetime(df_ac['time']).dt.date
df_ac = df_ac.groupby(["user_id", "sku_id"]).apply(more_than_a_day)
# 找出高潜用户
df_ac = df_ac[df_ac["potential_flag"] == 1]
# 写入文件
df_ac.to_csv(PROTENTIAL_USER_RECORD, index=False)
# 执行程序
find_potential_user()
ui_record = pd.read_csv(PROTENTIAL_USER_RECORD, header=0)
ui_record.head()
# 以某个用户为例
user_id = 62969
cu_record = ui_record[(ui_record['user_id'] == user_id)]
cu_record.tail()
time_range = pd.to_datetime(cu_record['time']).map(lambda x: x.strftime('%m-%d %H:%M'))
x_index = range(len(cu_record['type']))
# 设置图片大小
plt.figure(figsize=(18,5))
plt.scatter(x_index, cu_record['type'],c=cu_record['type'], s=36, lw=0, cmap=plt.cm.coolwarm)
plt.plot(x_index, cu_record['type'], 'y--', markersize=1)
plt.xlim(min(x_index) - 1, max(x_index) + 1)
plt.ylim(0, 7)
plt.xlabel('number')
plt.ylabel('behavior')
# plt.xticks(range(len(cu_record['type'])), time_range, rotation='vertical', fontsize=8)
plt.yticks(range(0,8), ["","browse","add cart","del cart","buy","favor", "click"])
plt.tight_layout()
# 以某个用户对某个商品为例
item_id = 51916
cu_record = ui_record[(ui_record['user_id'] == user_id) & (ui_record['sku_id'] == item_id)]
time_range = pd.to_datetime(cu_record['time']).map(lambda x: x.strftime('%m-%d %H:%M'))
x_index = range(len(cu_record['type']))
# 设置图片大小
plt.figure(figsize=(12,5))
plt.scatter(x_index, cu_record['type'],c=cu_record['type'], s=80, lw=0, cmap=plt.cm.rainbow)
plt.plot(x_index, cu_record['type'], 'y--', markersize=1)
plt.xlim(min(x_index) - 1, max(x_index) + 1)
plt.ylim(0, 7)
plt.xlabel('time')
plt.ylabel('behavior')
plt.xticks(range(len(cu_record['type'])), time_range, rotation='vertical', fontsize=8)
plt.yticks(range(0,8), ["","browse","add cart","del cart","buy","favor", "click"])
plt.tight_layout()
| 0.14259 | 0.681727 |
```
import pickle
from pathlib import Path
import numpy as np
from scipy.io import savemat, loadmat
from mgcpy.benchmarks.ts_benchmarks import IndependentAR1, CorrelatedAR1, Nonlinear, EconometricProcess, ExtinctGaussian
def _simulate_data(process, n_max, num_sims, output_dir="./data"):
# Store simulate processes.
X_full = np.zeros((n_max, num_sims))
Y_full = np.zeros((n_max, num_sims))
for s in range(num_sims):
X_full[:, s], Y_full[:, s] = process.simulate(n_max)
# Save simulated output.
output = {'X' : X_full, 'Y' : Y_full}
p = Path(output_dir)
if not p.is_dir():
p.mkdir(parents=True)
filename = p / f"{process.filename}_data.pkl"
file = open(filename, 'wb')
pickle.dump(output, file)
file.close()
# Save to MATLAB format as well.
savemat(p / f"{process.filename}_data.mat", {'X_full' : X_full, 'Y_full' : Y_full})
processes = [
IndependentAR1(),
CorrelatedAR1(),
Nonlinear(),
EconometricProcess(shift=0.5, scale=0.1),
ExtinctGaussian()
]
np.random.seed(1)
for process in processes:
_simulate_data(process, n_max=1000, num_sims=1000)
#verify no infs in econometric process
filename = "./data/%s_data.pkl" % (processes[3].filename)
pickle_in = open(filename,"rb")
data = pickle.load(pickle_in)
pickle_in.close()
X_full = data['X']
Y_full = data['Y']
np.any(np.isinf(X_full))
def generate_extinct_gaussians(phis, n_max, num_sims, output_dir="./data/extinct_rates/"):
"""
phis = list
"""
for phi in phis:
process = ExtinctGaussian(extinction_rate=phi)
X_full = np.zeros((n_max, num_sims))
Y_full = np.zeros((n_max, num_sims))
for s in range(num_sims):
X_full[:, s], Y_full[:, s] = process.simulate(n_max)
# Save to MATLAB format as well.
p = Path(output_dir)
if not p.is_dir():
p.mkdir(parents=True)
savemat(p / f'{process.filename}_phi_{"{:.3f}".format(phi)}_data.mat',
{'X_full' : X_full, 'Y_full' : Y_full})
phis = np.arange(.2, 1, 0.025)
n_max = 1200
n_sims = 1000
np.random.seed(1)
generate_extinct_gaussians(phis, n_max, n_sims)
def generate_varying_indep_ars(phis, n_max, num_sims, output_dir="./data/ars/"):
"""
phis = list
"""
for phi in phis:
process = IndependentAR1()
X_full = np.zeros((n_max, num_sims))
Y_full = np.zeros((n_max, num_sims))
for s in range(num_sims):
X_full[:, s], Y_full[:, s] = process.simulate(n=n_max, phi=phi)
# Save to MATLAB format as well.
p = Path(output_dir)
if not p.is_dir():
p.mkdir(parents=True)
savemat(p / f'{process.filename}_phi_{"{:.3f}".format(phi)}_data.mat',
{'X_full' : X_full, 'Y_full' : Y_full})
phis = np.arange(.1, 1, 0.05)
n_max = 1200
n_sims = 1000
np.random.seed(1)
generate_varying_indep_ars(phis, n_max, n_sims)
```
|
github_jupyter
|
import pickle
from pathlib import Path
import numpy as np
from scipy.io import savemat, loadmat
from mgcpy.benchmarks.ts_benchmarks import IndependentAR1, CorrelatedAR1, Nonlinear, EconometricProcess, ExtinctGaussian
def _simulate_data(process, n_max, num_sims, output_dir="./data"):
# Store simulate processes.
X_full = np.zeros((n_max, num_sims))
Y_full = np.zeros((n_max, num_sims))
for s in range(num_sims):
X_full[:, s], Y_full[:, s] = process.simulate(n_max)
# Save simulated output.
output = {'X' : X_full, 'Y' : Y_full}
p = Path(output_dir)
if not p.is_dir():
p.mkdir(parents=True)
filename = p / f"{process.filename}_data.pkl"
file = open(filename, 'wb')
pickle.dump(output, file)
file.close()
# Save to MATLAB format as well.
savemat(p / f"{process.filename}_data.mat", {'X_full' : X_full, 'Y_full' : Y_full})
processes = [
IndependentAR1(),
CorrelatedAR1(),
Nonlinear(),
EconometricProcess(shift=0.5, scale=0.1),
ExtinctGaussian()
]
np.random.seed(1)
for process in processes:
_simulate_data(process, n_max=1000, num_sims=1000)
#verify no infs in econometric process
filename = "./data/%s_data.pkl" % (processes[3].filename)
pickle_in = open(filename,"rb")
data = pickle.load(pickle_in)
pickle_in.close()
X_full = data['X']
Y_full = data['Y']
np.any(np.isinf(X_full))
def generate_extinct_gaussians(phis, n_max, num_sims, output_dir="./data/extinct_rates/"):
"""
phis = list
"""
for phi in phis:
process = ExtinctGaussian(extinction_rate=phi)
X_full = np.zeros((n_max, num_sims))
Y_full = np.zeros((n_max, num_sims))
for s in range(num_sims):
X_full[:, s], Y_full[:, s] = process.simulate(n_max)
# Save to MATLAB format as well.
p = Path(output_dir)
if not p.is_dir():
p.mkdir(parents=True)
savemat(p / f'{process.filename}_phi_{"{:.3f}".format(phi)}_data.mat',
{'X_full' : X_full, 'Y_full' : Y_full})
phis = np.arange(.2, 1, 0.025)
n_max = 1200
n_sims = 1000
np.random.seed(1)
generate_extinct_gaussians(phis, n_max, n_sims)
def generate_varying_indep_ars(phis, n_max, num_sims, output_dir="./data/ars/"):
"""
phis = list
"""
for phi in phis:
process = IndependentAR1()
X_full = np.zeros((n_max, num_sims))
Y_full = np.zeros((n_max, num_sims))
for s in range(num_sims):
X_full[:, s], Y_full[:, s] = process.simulate(n=n_max, phi=phi)
# Save to MATLAB format as well.
p = Path(output_dir)
if not p.is_dir():
p.mkdir(parents=True)
savemat(p / f'{process.filename}_phi_{"{:.3f}".format(phi)}_data.mat',
{'X_full' : X_full, 'Y_full' : Y_full})
phis = np.arange(.1, 1, 0.05)
n_max = 1200
n_sims = 1000
np.random.seed(1)
generate_varying_indep_ars(phis, n_max, n_sims)
| 0.425725 | 0.390447 |
<h1 align=center><font size="4"> Customer churn prediction: Predicting whether a customer will change telco provider</font></h1>
<h1 align=center><font size="2"> Author: Shahzaib S. Warraich </font></h1>
<h1>Table of contents</h1>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ol>
<li><a href="#load_dataset">Loading the training dataset</a></li>
<li><a href="#evaluation">Data wrangling and exploratory data analysis</a></li>
<li><a href="#evaluation">Model training and evaluation</a></li>
<li><a href="#evaluation">Loading the testing dataset</a></li>
<li><a href="#evaluation">Churn evaluation</a></li>
</div>
<br>
<hr>
<h2 id="load_dataset">1. Loading the training dataset</h2>
|Field name|Description|
|--- |--- |
|State|string. 2-letter code of the US state of customer residence|
|account_length|numerical. Number of months the customer has been with the current telco provider|
|area_code|string="area_code_AAA" where AAA = 3 digit area code.|
|International_plan|(yes/no). The customer has international plan.|
|Voice_mail_plan|(yes/no). The customer has voice mail plan.|
|number_vmail_messages|numerical. Number of voice-mail messages.|
|total_day_minutes|numerical. Total minutes of day calls|
|total_day_calls|numerical. Total number of day calls.|
|total_day_charge|numerical. Total charge of day calls.|
|total_eve_minutes| numerical. Total minutes of evening call|
|total_eve_calls|numerical. Total number of evening calls.|
|total_eve_charge|numerical. Total charge of evening calls.|
|total_night_minutes|numerical. Total minutes of night calls.|
|total_night_calls|numerical. Total number of night calls.|
|total_night_charge|numerical. Total charge of night calls.|
|total_intl_minutes|numerical. Total minutes of international calls.|
|total_intl_calls|numerical. Total number of international calls|
|total_intl_charge|numerical. Total charge of international calls|
|number_customer_service_calls|numerical. Number of calls to customer service|
|churn|(yes/no). Customer churn - target variable.|
```
import pandas as pd
df=pd.read_csv("train.csv")
df.head(10)
df.dtypes
df.shape
df.dropna(inplace=True)
df.shape
df['churn'].value_counts()
```
<h2 id="load_dataset">2. Data wrangling and exploratory data analysis</h2>
```
df.describe()
#Converting the yes/no fields to discrete form for modelling
from sklearn.preprocessing import LabelEncoder
number=LabelEncoder()
df['international_plan']=number.fit_transform(df['international_plan'].astype('str'))
df['voice_mail_plan']=number.fit_transform(df['voice_mail_plan'].astype('str'))
df['churn']=number.fit_transform(df['churn'].astype('str'))
df.head(10)
#get correlations of each feature in dataset
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
corrmat = df.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(df[top_corr_features].corr(),annot=True,cmap="RdYlGn")
df.columns
import plotly.express as px
import numpy as np
fig = px.pie(df, values="international_plan", names="churn",
title='International plan customer churn')
fig.show()
fig = px.pie(df, values="voice_mail_plan", names="churn",
title='Voice mail plan customer churn')
fig.show()
```
<p>We now have a better idea of what our data looks like and which variables are important to take into account when predicting the customer churn. We have narrowed it down to the following variables:</p>
Continuous numerical variables:
<ul>
<li>international_plan</li>
<li>total_day_minutes</li>
<li>total_day_charge</li>
<li>number_customer_service_calls</li>
<li>voice_mail_plan</li>
</ul>
<p>As we now move into building machine learning models to automate our analysis, feeding the model with variables that meaningfully affect our target variable will improve our model's prediction performance.</p>
<h2 id="load_dataset">3. Model training and evaluation</h2>
```
X= df[['international_plan','total_day_minutes','total_day_charge','number_customer_service_calls','voice_mail_plan']].values
X[0:20]
y = df['churn'].values
y[0:20]
from sklearn import preprocessing
#normalize data
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
```
This is a supervised learning (classification) use case. Hence, we'll be using the following algorithms to train our model:
- K Nearest Neighbor (KNN)
- Decision Tree
- Support Vector Machine
- Logistic Regression
- Random Forest
- Naive Bayes
```
from sklearn.model_selection import train_test_split
#Splitting the dataset
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
```
## K Nearest Neighbor(KNN)
```
from sklearn.neighbors import KNeighborsClassifier
#Identifying the best number of neighbours
from sklearn import metrics
Ks = 10
mean_acc = np.zeros((Ks-1))
std_acc = np.zeros((Ks-1))
ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train)
yhat=neigh.predict(X_test)
mean_acc[n-1] = metrics.accuracy_score(y_test, yhat)
std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0])
mean_acc
plt.plot(range(1,Ks),mean_acc,'g')
plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.15)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Neighbours (K)')
plt.tight_layout()
plt.show()
print( "Best accuracy:", mean_acc.max(), "k=", mean_acc.argmax()+1)
k = 9
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
neigh
from sklearn import metrics
nhat=neigh.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, nhat))
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
print("The jaccard score is", jaccard_similarity_score(y_test, nhat))
print('The F1 score is', f1_score(y_test, nhat, average='weighted'))
```
## Decision Tree
```
from sklearn.tree import DecisionTreeClassifier
Tree = DecisionTreeClassifier(criterion="entropy", max_depth = 6).fit(X_train,y_train)
Tree
That=Tree.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, That))
print("The jaccard score is", jaccard_similarity_score(y_test, That))
print('The F1 score is', f1_score(y_test, That, average='weighted'))
```
## Support Vector Machine
```
from sklearn import svm
clf = svm.SVC(kernel='rbf').fit(X_train,y_train)
clf
Shat=clf.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Shat))
print("The jaccard score is", jaccard_similarity_score(y_test, Shat))
print('The F1 score is', f1_score(y_test, Shat, average='weighted'))
```
## Logistic Regression
```
from sklearn.linear_model import LogisticRegression
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)
LR
Lhat=LR.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Lhat))
print("The jaccard score is", jaccard_similarity_score(y_test, Lhat))
print('The F1 score is', f1_score(y_test, Lhat, average='weighted'))
```
## Random Forest
```
from sklearn.ensemble import RandomForestClassifier
Forest = RandomForestClassifier(max_depth=6, random_state=1).fit(X_train,y_train)
Forest
Rhat=Forest.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Rhat))
print("The jaccard score is", jaccard_similarity_score(y_test, Rhat))
print('The F1 score is', f1_score(y_test, Rhat, average='weighted'))
```
## Naive Bayes
```
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
NB = gnb.fit(X_train, y_train)
NB
Nhat=NB.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Nhat))
print("The jaccard score is", jaccard_similarity_score(y_test, Nhat))
print('The F1 score is', f1_score(y_test, Nhat, average='weighted'))
```
| Algorithm | Jaccard | F1-score |
|--------------------|---------|----------|
| KNN | 0.92 | 0.91 |
| Decision Tree | 0.91 | 0.90 |
| SVM | 0.92 | 0.91 |
| Logistic Regression| 0.87 | 0.83 |
| Random Forest | 0.93 | 0.92 |
| Naive Bayes | 0.87 | 0.87 |
<h2 id="load_dataset">4. Loading the testing dataset</h2>
```
test_df=pd.read_csv("test.csv")
test_df.head(10)
#adjusting the dataset
test_df['international_plan']=number.fit_transform(test_df['international_plan'].astype('str'))
test_df['voice_mail_plan']=number.fit_transform(test_df['voice_mail_plan'].astype('str'))
test_df.head(10)
test_df.shape
test_X= test_df[['international_plan','total_day_minutes','total_day_charge','number_customer_service_calls','voice_mail_plan']].values
test_X[0:20]
test_X= preprocessing.StandardScaler().fit(test_X).transform(test_X)
test_X[0:5]
```
<h2 id="load_dataset">5. Churn evaluation</h2>
#### Using the best classifier (Random Forest) to predict churn values on new data
```
Churn=Forest.predict(test_X)
test_df["churn"]=Churn
test_df.head(20)
#Creating a new csv file
test_df.to_csv('Telcos_churn_data.csv')
```
|
github_jupyter
|
import pandas as pd
df=pd.read_csv("train.csv")
df.head(10)
df.dtypes
df.shape
df.dropna(inplace=True)
df.shape
df['churn'].value_counts()
df.describe()
#Converting the yes/no fields to discrete form for modelling
from sklearn.preprocessing import LabelEncoder
number=LabelEncoder()
df['international_plan']=number.fit_transform(df['international_plan'].astype('str'))
df['voice_mail_plan']=number.fit_transform(df['voice_mail_plan'].astype('str'))
df['churn']=number.fit_transform(df['churn'].astype('str'))
df.head(10)
#get correlations of each feature in dataset
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
corrmat = df.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(df[top_corr_features].corr(),annot=True,cmap="RdYlGn")
df.columns
import plotly.express as px
import numpy as np
fig = px.pie(df, values="international_plan", names="churn",
title='International plan customer churn')
fig.show()
fig = px.pie(df, values="voice_mail_plan", names="churn",
title='Voice mail plan customer churn')
fig.show()
X= df[['international_plan','total_day_minutes','total_day_charge','number_customer_service_calls','voice_mail_plan']].values
X[0:20]
y = df['churn'].values
y[0:20]
from sklearn import preprocessing
#normalize data
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
from sklearn.model_selection import train_test_split
#Splitting the dataset
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
from sklearn.neighbors import KNeighborsClassifier
#Identifying the best number of neighbours
from sklearn import metrics
Ks = 10
mean_acc = np.zeros((Ks-1))
std_acc = np.zeros((Ks-1))
ConfustionMx = [];
for n in range(1,Ks):
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = n).fit(X_train,y_train)
yhat=neigh.predict(X_test)
mean_acc[n-1] = metrics.accuracy_score(y_test, yhat)
std_acc[n-1]=np.std(yhat==y_test)/np.sqrt(yhat.shape[0])
mean_acc
plt.plot(range(1,Ks),mean_acc,'g')
plt.fill_between(range(1,Ks),mean_acc - 1 * std_acc,mean_acc + 1 * std_acc, alpha=0.15)
plt.legend(('Accuracy ', '+/- 3xstd'))
plt.ylabel('Accuracy ')
plt.xlabel('Number of Neighbours (K)')
plt.tight_layout()
plt.show()
print( "Best accuracy:", mean_acc.max(), "k=", mean_acc.argmax()+1)
k = 9
#Train Model and Predict
neigh = KNeighborsClassifier(n_neighbors = k).fit(X_train,y_train)
neigh
from sklearn import metrics
nhat=neigh.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, nhat))
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
print("The jaccard score is", jaccard_similarity_score(y_test, nhat))
print('The F1 score is', f1_score(y_test, nhat, average='weighted'))
from sklearn.tree import DecisionTreeClassifier
Tree = DecisionTreeClassifier(criterion="entropy", max_depth = 6).fit(X_train,y_train)
Tree
That=Tree.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, That))
print("The jaccard score is", jaccard_similarity_score(y_test, That))
print('The F1 score is', f1_score(y_test, That, average='weighted'))
from sklearn import svm
clf = svm.SVC(kernel='rbf').fit(X_train,y_train)
clf
Shat=clf.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Shat))
print("The jaccard score is", jaccard_similarity_score(y_test, Shat))
print('The F1 score is', f1_score(y_test, Shat, average='weighted'))
from sklearn.linear_model import LogisticRegression
LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train)
LR
Lhat=LR.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Lhat))
print("The jaccard score is", jaccard_similarity_score(y_test, Lhat))
print('The F1 score is', f1_score(y_test, Lhat, average='weighted'))
from sklearn.ensemble import RandomForestClassifier
Forest = RandomForestClassifier(max_depth=6, random_state=1).fit(X_train,y_train)
Forest
Rhat=Forest.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Rhat))
print("The jaccard score is", jaccard_similarity_score(y_test, Rhat))
print('The F1 score is', f1_score(y_test, Rhat, average='weighted'))
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
NB = gnb.fit(X_train, y_train)
NB
Nhat=NB.predict(X_test)
print("Test set Accuracy: ", metrics.accuracy_score(y_test, Nhat))
print("The jaccard score is", jaccard_similarity_score(y_test, Nhat))
print('The F1 score is', f1_score(y_test, Nhat, average='weighted'))
test_df=pd.read_csv("test.csv")
test_df.head(10)
#adjusting the dataset
test_df['international_plan']=number.fit_transform(test_df['international_plan'].astype('str'))
test_df['voice_mail_plan']=number.fit_transform(test_df['voice_mail_plan'].astype('str'))
test_df.head(10)
test_df.shape
test_X= test_df[['international_plan','total_day_minutes','total_day_charge','number_customer_service_calls','voice_mail_plan']].values
test_X[0:20]
test_X= preprocessing.StandardScaler().fit(test_X).transform(test_X)
test_X[0:5]
Churn=Forest.predict(test_X)
test_df["churn"]=Churn
test_df.head(20)
#Creating a new csv file
test_df.to_csv('Telcos_churn_data.csv')
| 0.577019 | 0.9666 |
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
import emcee
import os
import sys
sys.path.insert(0, '../')
from libra import (IRTFTemplate, magnitudes,
nirspec_pixel_wavelengths, throughput, kepler62,
background, poisson, transit_model, transit_duration)
from copy import deepcopy
from corner import corner
sptype = 'K2V'
mag = magnitudes['Kepler-62']['J']
exptime = 100*u.s
name = 'kepler62'
planets = list('bcdef')
system = kepler62
for planet in planets:
results_dir = os.path.join('posteriors', name)
if not os.path.exists(results_dir):
os.mkdir(results_dir)
planet_params = system(planet)
duration = transit_duration(planet_params)
if np.isnan(duration):
duration = 2/24
times = np.arange(planet_params.t0 - 2*duration,
planet_params.t0 + 2*duration,
exptime.to(u.day).value)
spectrum_photo = IRTFTemplate(sptype)
transit = transit_model(times, planet_params)
wl = nirspec_pixel_wavelengths()
fluxes = np.zeros((len(times), len(wl)))
for i in range(len(times)):
fluxes[i, :] = poisson(spectrum_photo.n_photons(wl, exptime, mag) * transit[i] *
throughput(wl) + background(wl, exptime))
spectral_fluxes = np.sum(fluxes, axis=1)
t, f, e = times, spectral_fluxes, np.sqrt(spectral_fluxes)
def model(p, t, init_params):
trial_params = deepcopy(init_params)
trial_params.t0 = p[0]
trial_params.rp = p[1]**0.5
return p[2] * transit_model(t, trial_params)
def lnlikelihood(p, t, init_params):
return -0.5 * np.nansum((model(p, t, init_params) - f)**2 / e**2)
def lnprior(p, t, init_params):
t0, depth, amp = p
if ((init_params.t0 - 0.1 < t0 < init_params.t0 + 0.1) and
((0.5 * init_params.rp)**2 < depth < (1.5 * init.params.rp)**2) and
(0.999 * np.median(f) < amp < 1.001 * np.median(f))):
return 0.0
return -np.inf
ndim, nwalkers = 3, 6
init_p = np.array([planet_params.t0, planet_params.rp**2, np.median(f)])
pos = [init_p + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlikelihood, args=(t, planet_params))
sampler.run_mcmc(pos, 5000);
old_shape = sampler.chain.shape
new_shape = (old_shape[0] * 2000, ndim)
samples = sampler.chain[:, -2000:, :].reshape(new_shape)
corner(samples, labels=['time', 'depth', 'f0'])
plt.savefig('posteriors/{0}/{1}.png'.format(name, planet), bbox_inches='tight', dpi=200)
plt.close()
plt.figure()
for i in np.random.randint(0, samples.shape[0], 100):
step = model(samples[i, :], t, planet_params)
plt.plot(t, step, alpha=0.05, color='k')
#plt.errorbar(t, f, e, fmt='.', color='r')
plt.plot(t, f, '.', color='r')
plt.xlabel('Time [JD]')
plt.ylabel('NIRSpec counts')
plt.title('Assuming only Poisson errors')
plt.savefig('posteriors/{0}/lightcurve_{1}.png'.format(name, planet), bbox_inches='tight', dpi=200)
t0_mcmc, depth_mcmc, amp_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(sampler.flatchain[5000:, :], [16, 50, 84],
axis=0)))
np.savetxt('posteriors/{0}/time_solution_{1}.txt'.format(name, planet), t0_mcmc)
plt.close()
import json
d = {}
for planet in list('bcdef'):
mid, upper, lower = np.loadtxt('posteriors/{0}/time_solution_{1}.txt'.format(name, planet), unpack=True)
t_rms = ((0.5 * (upper + lower))*u.day).to(u.s).value
d[planet] = t_rms
print("planet {0} t_rms: {1:.2f}".format(planet, t_rms))
json.dump(d, open('photon_limited/{0}.json'.format(name), 'w'),
indent=4, sort_keys=True)
```
|
github_jupyter
|
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
import emcee
import os
import sys
sys.path.insert(0, '../')
from libra import (IRTFTemplate, magnitudes,
nirspec_pixel_wavelengths, throughput, kepler62,
background, poisson, transit_model, transit_duration)
from copy import deepcopy
from corner import corner
sptype = 'K2V'
mag = magnitudes['Kepler-62']['J']
exptime = 100*u.s
name = 'kepler62'
planets = list('bcdef')
system = kepler62
for planet in planets:
results_dir = os.path.join('posteriors', name)
if not os.path.exists(results_dir):
os.mkdir(results_dir)
planet_params = system(planet)
duration = transit_duration(planet_params)
if np.isnan(duration):
duration = 2/24
times = np.arange(planet_params.t0 - 2*duration,
planet_params.t0 + 2*duration,
exptime.to(u.day).value)
spectrum_photo = IRTFTemplate(sptype)
transit = transit_model(times, planet_params)
wl = nirspec_pixel_wavelengths()
fluxes = np.zeros((len(times), len(wl)))
for i in range(len(times)):
fluxes[i, :] = poisson(spectrum_photo.n_photons(wl, exptime, mag) * transit[i] *
throughput(wl) + background(wl, exptime))
spectral_fluxes = np.sum(fluxes, axis=1)
t, f, e = times, spectral_fluxes, np.sqrt(spectral_fluxes)
def model(p, t, init_params):
trial_params = deepcopy(init_params)
trial_params.t0 = p[0]
trial_params.rp = p[1]**0.5
return p[2] * transit_model(t, trial_params)
def lnlikelihood(p, t, init_params):
return -0.5 * np.nansum((model(p, t, init_params) - f)**2 / e**2)
def lnprior(p, t, init_params):
t0, depth, amp = p
if ((init_params.t0 - 0.1 < t0 < init_params.t0 + 0.1) and
((0.5 * init_params.rp)**2 < depth < (1.5 * init.params.rp)**2) and
(0.999 * np.median(f) < amp < 1.001 * np.median(f))):
return 0.0
return -np.inf
ndim, nwalkers = 3, 6
init_p = np.array([planet_params.t0, planet_params.rp**2, np.median(f)])
pos = [init_p + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlikelihood, args=(t, planet_params))
sampler.run_mcmc(pos, 5000);
old_shape = sampler.chain.shape
new_shape = (old_shape[0] * 2000, ndim)
samples = sampler.chain[:, -2000:, :].reshape(new_shape)
corner(samples, labels=['time', 'depth', 'f0'])
plt.savefig('posteriors/{0}/{1}.png'.format(name, planet), bbox_inches='tight', dpi=200)
plt.close()
plt.figure()
for i in np.random.randint(0, samples.shape[0], 100):
step = model(samples[i, :], t, planet_params)
plt.plot(t, step, alpha=0.05, color='k')
#plt.errorbar(t, f, e, fmt='.', color='r')
plt.plot(t, f, '.', color='r')
plt.xlabel('Time [JD]')
plt.ylabel('NIRSpec counts')
plt.title('Assuming only Poisson errors')
plt.savefig('posteriors/{0}/lightcurve_{1}.png'.format(name, planet), bbox_inches='tight', dpi=200)
t0_mcmc, depth_mcmc, amp_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(sampler.flatchain[5000:, :], [16, 50, 84],
axis=0)))
np.savetxt('posteriors/{0}/time_solution_{1}.txt'.format(name, planet), t0_mcmc)
plt.close()
import json
d = {}
for planet in list('bcdef'):
mid, upper, lower = np.loadtxt('posteriors/{0}/time_solution_{1}.txt'.format(name, planet), unpack=True)
t_rms = ((0.5 * (upper + lower))*u.day).to(u.s).value
d[planet] = t_rms
print("planet {0} t_rms: {1:.2f}".format(planet, t_rms))
json.dump(d, open('photon_limited/{0}.json'.format(name), 'w'),
indent=4, sort_keys=True)
| 0.460046 | 0.463687 |
# MNIST Dynamic Filter Classification
Note: This notebook is desinged to run with Python3 and GPU runtime.

This notebook uses TensorFlow2.x.
```
%tensorflow_version 2.x
```
####[MDF-01]
Import modules and set random seeds.
```
import numpy as np
from pandas import DataFrame
import tensorflow as tf
from tensorflow.keras import layers, models, initializers
from tensorflow.keras.datasets import mnist
np.random.seed(20190222)
tf.random.set_seed(20190222)
```
####[MDF-02]
Download the MNIST dataset and store into NumPy arrays.
```
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape(
(len(train_images), 784)).astype('float32') / 255
test_images = test_images.reshape(
(len(test_images), 784)).astype('float32') / 255
train_labels = tf.keras.utils.to_categorical(train_labels, 10)
test_labels = tf.keras.utils.to_categorical(test_labels, 10)
```
####[MDF-03]
Define a CNN model with a single convolutional filter layer.
```
model = models.Sequential()
model.add(layers.Reshape((28, 28, 1), input_shape=(28*28,), name='reshape'))
model.add(layers.Conv2D(16, (5, 5), padding='same',
kernel_initializer=initializers.TruncatedNormal(),
use_bias=True, activation='relu',
name='conv_filter'))
model.add(layers.MaxPooling2D((2, 2), name='max_pooling'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(1024, activation='relu',
kernel_initializer=initializers.TruncatedNormal(),
name='hidden'))
model.add(layers.Dense(10, activation='softmax', name='softmax'))
model.summary()
```
####[MDF-04]
Compile the model using the Adam optimizer, and Cross entroy as a loss function.
```
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['acc'])
```
####[MDF-05]
Train the model. It achieves the 99.0% accuracy.
```
history = model.fit(train_images, train_labels,
validation_data=(test_images, test_labels),
batch_size=128, epochs=10)
```
####[MDF-06]
Plot charts for the accuracy and loss values.
```
DataFrame({'acc': history.history['acc'],
'val_acc': history.history['val_acc']}).plot()
DataFrame({'loss': history.history['loss'],
'val_loss': history.history['val_loss']}).plot()
```
####[MDF-07]
Mount your Google Drive on `/content/gdrive`.
```
from google.colab import drive
drive.mount('/content/gdrive')
```
####[MDF-08]
Export the trained model as a file `MNIST_single.hd5` on your Google Drive.
```
model.save('/content/gdrive/My Drive/MNIST_single.hd5', save_format='h5')
!ls -lh '/content/gdrive/My Drive/MNIST_single.hd5'
```
|
github_jupyter
|
%tensorflow_version 2.x
import numpy as np
from pandas import DataFrame
import tensorflow as tf
from tensorflow.keras import layers, models, initializers
from tensorflow.keras.datasets import mnist
np.random.seed(20190222)
tf.random.set_seed(20190222)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape(
(len(train_images), 784)).astype('float32') / 255
test_images = test_images.reshape(
(len(test_images), 784)).astype('float32') / 255
train_labels = tf.keras.utils.to_categorical(train_labels, 10)
test_labels = tf.keras.utils.to_categorical(test_labels, 10)
model = models.Sequential()
model.add(layers.Reshape((28, 28, 1), input_shape=(28*28,), name='reshape'))
model.add(layers.Conv2D(16, (5, 5), padding='same',
kernel_initializer=initializers.TruncatedNormal(),
use_bias=True, activation='relu',
name='conv_filter'))
model.add(layers.MaxPooling2D((2, 2), name='max_pooling'))
model.add(layers.Flatten(name='flatten'))
model.add(layers.Dense(1024, activation='relu',
kernel_initializer=initializers.TruncatedNormal(),
name='hidden'))
model.add(layers.Dense(10, activation='softmax', name='softmax'))
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['acc'])
history = model.fit(train_images, train_labels,
validation_data=(test_images, test_labels),
batch_size=128, epochs=10)
DataFrame({'acc': history.history['acc'],
'val_acc': history.history['val_acc']}).plot()
DataFrame({'loss': history.history['loss'],
'val_loss': history.history['val_loss']}).plot()
from google.colab import drive
drive.mount('/content/gdrive')
model.save('/content/gdrive/My Drive/MNIST_single.hd5', save_format='h5')
!ls -lh '/content/gdrive/My Drive/MNIST_single.hd5'
| 0.789356 | 0.97859 |
```
import numpy as np
import pandas as pd
from datetime import datetime as dt
import time
from constants import (EXPORT_DIR, T, DATA_DIR, VAL_THRESHOLD, TEST_THRESHOLD, SEED)
import itertools
from collections import OrderedDict
import scipy
print(f"SciPy version: {scipy.__version__}")
import scipy.sparse as sp
import random
random.seed(SEED)
import matplotlib.pyplot as plt
FONT_SIZE = 24
plt.rcParams['figure.figsize'] = (20,8)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.size'] = FONT_SIZE
plt.rcParams['legend.fontsize'] = FONT_SIZE
plt.rcParams['xtick.labelsize'] = FONT_SIZE
plt.rcParams['ytick.labelsize'] = FONT_SIZE
%config InlineBackend.figure_format ='retina'
%%time
df = pd.read_csv(DATA_DIR+'rees46-data-cleaned.csv',engine='c', sep=',',usecols=["user_id","csd"],
dtype={'user_id': np.int64,'csd':'category'})
df.info()
new_user_id = pd.DataFrame()
new_user_id['user_id']=df.user_id.unique()
print(f"We will have {T.B}{len(new_user_id):,} unique users.{T.E}")
new_user_id.to_csv(DATA_DIR+'purchase_uid_key.csv', index = True, header=True)
uid_lookup = pd.Series(index=new_user_id.user_id,data=new_user_id.index)
uid_lookup = uid_lookup.to_dict(OrderedDict)
del new_user_id
categories = set(df.csd)
print(f'There are {T.G}{len(categories)}{T.E} categories in the dataframe')
def pair_id_creator(df):
"""Creates pair ID for the dataframe 'df'
we assume a single event type in the dataframe"""
return df[['user_id','csd']].drop_duplicates().sort_values('user_id')
user_csd_pairs = pair_id_creator(df)
total_outcomes = len(user_csd_pairs)
print('Total outcomes:',total_outcomes)
csd_value_counts = user_csd_pairs['csd'].value_counts()
csd_value_counts
```
# Probabilities including **all** categories
```
num_categories = len(csd_value_counts)
csd_key = pd.DataFrame()
csd_key['csd']=csd_value_counts.index
assert num_categories == len(csd_key), 'The number of categories must match regardless of method'
print(f"We will have {T.B}{len(csd_key):,}{T.E} unique categories.")
# csd_key.to_csv(DATA_DIR+'purchase_csd_key.csv', index = True, header=True)
csd_rlookup = pd.Series(index=csd_key.index,data=csd_key.csd)
csd_rlookup = csd_rlookup.to_dict(OrderedDict)
del csd_key
print(f'Last key is {T.B}{num_categories-1}{T.E}, representing {T.B}{csd_rlookup[num_categories-1]}{T.E}.')
def PAgivenB(A,B):
P_B = csd_value_counts[B]/total_outcomes
B_users = set(user_csd_pairs[user_csd_pairs['csd']==B]['user_id'])
A_users = set(user_csd_pairs[user_csd_pairs['csd']==A]['user_id'])
P_AiB = len(A_users.intersection(B_users))/total_outcomes
return P_AiB / P_B
print('Probability of a user buying kettle given they bought a washer:',PAgivenB('appliances.kitchen.kettle','appliances.kitchen.washer'))
%%time
P = np.zeros((num_categories, num_categories))
for i in range(num_categories):
for j in range(num_categories):
P[i,j] = PAgivenB(csd_rlookup[i],csd_rlookup[j])
a = 4
b = 7
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
a = 90
b = 90
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
a = 90
b = 94
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
P_Eye = P-np.eye(num_categories, num_categories)
max_value = np.amax(P_Eye)
print('Maximum value of the array is:',max_value)
np.where(P_Eye == max_value)
a = 0
b = 122
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
df[df['csd']=='apparel.shoes']
df[df['user_id']==1515915625512096000]
```
### **Conclusion:** we need to eliminate categories with very few purchases!
```
num_categories = int(len(csd_value_counts)*.2)
num_categories
csd_key = pd.DataFrame()
csd_key['csd']=csd_value_counts.index[:num_categories]
assert num_categories == len(csd_key), 'The number of categories must match regardless of method'
print(f"We will have {T.B}{len(csd_key):,}{T.E} unique categories.")
csd_key.to_csv(DATA_DIR+'purchase_csd_key.csv', index = True, header=True)
csd_rlookup = pd.Series(index=csd_key.index,data=csd_key.csd)
csd_rlookup = csd_rlookup.to_dict(OrderedDict)
csd_lookup = pd.Series(index=csd_key.csd,data=csd_key.index)
csd_lookup = csd_lookup.to_dict(OrderedDict)
del csd_key
print(f'Last key is {T.B}{num_categories-1}{T.E}, representing {T.B}{csd_rlookup[num_categories-1]}{T.E}.')
def PAgivenB(A,B):
P_B = csd_value_counts[B]/total_outcomes
B_users = set(user_csd_pairs[user_csd_pairs['csd']==B]['user_id'])
A_users = set(user_csd_pairs[user_csd_pairs['csd']==A]['user_id'])
P_AiB = len(A_users.intersection(B_users))/total_outcomes
return P_AiB / P_B
print('Probability of a user buying kettle given they bought a washer:',PAgivenB('appliances.kitchen.kettle','appliances.kitchen.washer'))
%%time
P = np.zeros((num_categories, num_categories))
for i in range(num_categories):
for j in range(num_categories):
P[i,j] = PAgivenB(csd_rlookup[i],csd_rlookup[j])
P_Eye = P-np.eye(num_categories, num_categories)
max_value = np.amax(P_Eye)
print('Maximum value of the array is:',max_value)
(a,b) = np.where(P_Eye == max_value)
a,b = a[0],b[0]
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
print(f'Probability of {csd_rlookup[b]} given {csd_rlookup[a]}: {P[b,a]}')
min_value = np.amin(P)
print('Minimum value of the array is:',min_value)
np.where(P_Eye == max_value)
(a,b) = np.where(P == min_value)
a,b = a[0],b[0]
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
```
# Predicting notebook sales
```
print(f"computers.notebook (id:{csd_lookup['computers.notebook']}) sales prediction")
start_time = time.time()
number_of_users = df['user_id'].unique().shape[0]
number_of_features = num_categories
def user_experience_matrix(df,category_id_to_predict,print_freq=30000):
last_index = df.shape[0]-1
# Use np.float32 for torch.cuda.FloatTensor.or np.float16 for torch.cuda.HalfTensor (float64 not recommended)
uxm = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32)
print(f" # | User | {'Category':40} | Previous | {T.b}New UX{T.E}")
ignored_keys = 0
for row in df.itertuples():
uid = uid_lookup[row.user_id]
try:
csd = csd_lookup[row.csd]
prev_ux = uxm[uid,csd]
ux = np.tanh(prev_ux+P[category_id_to_predict,csd])
uxm[uid,csd] = ux
if (row.Index % print_freq == 0) or (row.Index == last_index):
print(f"{row.Index:8} | "+
f"{uid:6} | "+
f"{row.csd:40} | "+
f"{prev_ux:8.5f} | "+
f"{T.b}{ux:8.5f}{T.E}")
except KeyError:
ignored_keys += 1
if (row.Index % print_freq == 0) or (row.Index == last_index):
print(f"{row.Index:8} | "+
f"{uid:6} | "+
f"{row.csd:40} | "+
f"{T.R} ignored category{T.E}")
print(f'Ignored keys: {ignored_keys}')
return uxm
%%time
uxm = user_experience_matrix(df,1)
print(f"Elapsed time: {time.time()-start_time:.2f} seconds")
```
# Train - test - validation split
```
def save_to_npz(X,path):
X = X.tocoo()
sp.save_npz(path,X)
print(f"{T.G}Sparse matrix saved to: {path}{T.E}")
print(f"Train: {VAL_THRESHOLD*100:.2f}% \nValidation: {(1-TEST_THRESHOLD)*100:.2f}% \nTest: {(1-TEST_THRESHOLD)*100:.2f}%")
NNZ = uxm.nnz
print(f"Number of stored values: {NNZ:,}")
uxm.shape
%%time
uxm_train = sp.dok_matrix.copy(uxm)
uxm_val = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32)
uxm_test = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32)
%%time
rows,cols = uxm_train.nonzero()
for row,col in zip(rows,cols):
rnd = random.random()
if rnd > TEST_THRESHOLD:
uxm_test[row,col] = uxm_train[row,col]
uxm_train[row,col] = 0
elif rnd > VAL_THRESHOLD:
uxm_val[row,col] = uxm_train[row,col]
uxm_train[row,col] = 0
print(f"Number of train data values: {uxm_train.nnz:,} ({uxm_train.nnz*100/NNZ:.2f}%)")
print(f"Number of validation data values: {uxm_val.nnz:,} ({uxm_val.nnz*100/NNZ:.2f}%)")
print(f"Number of test data values: {uxm_test.nnz:,} ({uxm_test.nnz*100/NNZ:.2f}%)")
errormessage = '''All datapoints should be in either the train, the test of the validation datasets.
The reason might be a change in how .nnz of a DOK matrix (scipy.sparse.dok_matrix) is calculated.
In version 1.5.2 SciPy setting the value to zero explicitly (X[i,j]=0) is not counted by .nnz'''
assert NNZ - uxm_train.nnz - uxm_val.nnz - uxm_test.nnz == 0, errormessage
save_to_npz(uxm,DATA_DIR+r'uxm-purchase.npz')
save_to_npz(uxm_train,DATA_DIR+r'uxm_train-purchase.npz')
# save_to_npz(uxm_val,VAL_DATA_PATH)
# save_to_npz(uxm_test,TEST_DATA_PATH)
```
# Event types in the likelihood matrix
```
event_counts = df.csd.value_counts()[:num_categories]
event_counts
event_counts.sum()
fig, ax = plt.subplots(figsize=(16, 9), subplot_kw=dict(aspect="equal"))
# W - width; P - precision; B - bold; N - normal
W = 2
P = 2
E = 'events'
event_perc = df.csd.value_counts(normalize=True)[:num_categories] * 100
event_type_legend = [r"\textbf{Smartphone}"+f"\n{event_perc['electronics.smartphone']:{W}.{P}f}\%\n{event_counts['electronics.smartphone']:,} {E}",
r"\textbf{Notebook}"+f"\n{event_perc['computers.notebook']:{W}.{P}f}\%\n{event_counts['computers.notebook']:,} {E}",
# r"\textbf{Refrigerator}"+f"\n{event_perc['appliances.kitchen.refrigerators']:{W}.{P}f}\%\n{event_counts['appliances.kitchen.refrigerators']:,} {E}",
# r"\textbf{Purchased}"+f"\n{event_perc['purchase']:{W}.{P}f}\%\n{event_counts['purchase']:,} {E}"
]
cmap = plt.get_cmap("tab20")
wedges, texts = ax.pie(event_counts, wedgeprops=dict(width=0.382), textprops=dict(color="w"), startangle=-40, colors=cmap([0,5,6,4]))
bbox_props = dict(boxstyle="square,pad=0.618", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
if i <2:
ax.annotate(event_type_legend[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),horizontalalignment=horizontalalignment, **kw)
fig.set_facecolor('w')
# ax.set_title("Logged user events")
plt.tight_layout()
fig.savefig(EXPORT_DIR+'purchase-percentage-of-events.png',dpi=300)
fig.savefig(EXPORT_DIR+'purchase-percentage-of-events.pdf')
plt.show()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from datetime import datetime as dt
import time
from constants import (EXPORT_DIR, T, DATA_DIR, VAL_THRESHOLD, TEST_THRESHOLD, SEED)
import itertools
from collections import OrderedDict
import scipy
print(f"SciPy version: {scipy.__version__}")
import scipy.sparse as sp
import random
random.seed(SEED)
import matplotlib.pyplot as plt
FONT_SIZE = 24
plt.rcParams['figure.figsize'] = (20,8)
plt.rcParams['text.usetex'] = True
plt.rcParams['font.size'] = FONT_SIZE
plt.rcParams['legend.fontsize'] = FONT_SIZE
plt.rcParams['xtick.labelsize'] = FONT_SIZE
plt.rcParams['ytick.labelsize'] = FONT_SIZE
%config InlineBackend.figure_format ='retina'
%%time
df = pd.read_csv(DATA_DIR+'rees46-data-cleaned.csv',engine='c', sep=',',usecols=["user_id","csd"],
dtype={'user_id': np.int64,'csd':'category'})
df.info()
new_user_id = pd.DataFrame()
new_user_id['user_id']=df.user_id.unique()
print(f"We will have {T.B}{len(new_user_id):,} unique users.{T.E}")
new_user_id.to_csv(DATA_DIR+'purchase_uid_key.csv', index = True, header=True)
uid_lookup = pd.Series(index=new_user_id.user_id,data=new_user_id.index)
uid_lookup = uid_lookup.to_dict(OrderedDict)
del new_user_id
categories = set(df.csd)
print(f'There are {T.G}{len(categories)}{T.E} categories in the dataframe')
def pair_id_creator(df):
"""Creates pair ID for the dataframe 'df'
we assume a single event type in the dataframe"""
return df[['user_id','csd']].drop_duplicates().sort_values('user_id')
user_csd_pairs = pair_id_creator(df)
total_outcomes = len(user_csd_pairs)
print('Total outcomes:',total_outcomes)
csd_value_counts = user_csd_pairs['csd'].value_counts()
csd_value_counts
num_categories = len(csd_value_counts)
csd_key = pd.DataFrame()
csd_key['csd']=csd_value_counts.index
assert num_categories == len(csd_key), 'The number of categories must match regardless of method'
print(f"We will have {T.B}{len(csd_key):,}{T.E} unique categories.")
# csd_key.to_csv(DATA_DIR+'purchase_csd_key.csv', index = True, header=True)
csd_rlookup = pd.Series(index=csd_key.index,data=csd_key.csd)
csd_rlookup = csd_rlookup.to_dict(OrderedDict)
del csd_key
print(f'Last key is {T.B}{num_categories-1}{T.E}, representing {T.B}{csd_rlookup[num_categories-1]}{T.E}.')
def PAgivenB(A,B):
P_B = csd_value_counts[B]/total_outcomes
B_users = set(user_csd_pairs[user_csd_pairs['csd']==B]['user_id'])
A_users = set(user_csd_pairs[user_csd_pairs['csd']==A]['user_id'])
P_AiB = len(A_users.intersection(B_users))/total_outcomes
return P_AiB / P_B
print('Probability of a user buying kettle given they bought a washer:',PAgivenB('appliances.kitchen.kettle','appliances.kitchen.washer'))
%%time
P = np.zeros((num_categories, num_categories))
for i in range(num_categories):
for j in range(num_categories):
P[i,j] = PAgivenB(csd_rlookup[i],csd_rlookup[j])
a = 4
b = 7
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
a = 90
b = 90
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
a = 90
b = 94
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
P_Eye = P-np.eye(num_categories, num_categories)
max_value = np.amax(P_Eye)
print('Maximum value of the array is:',max_value)
np.where(P_Eye == max_value)
a = 0
b = 122
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
df[df['csd']=='apparel.shoes']
df[df['user_id']==1515915625512096000]
num_categories = int(len(csd_value_counts)*.2)
num_categories
csd_key = pd.DataFrame()
csd_key['csd']=csd_value_counts.index[:num_categories]
assert num_categories == len(csd_key), 'The number of categories must match regardless of method'
print(f"We will have {T.B}{len(csd_key):,}{T.E} unique categories.")
csd_key.to_csv(DATA_DIR+'purchase_csd_key.csv', index = True, header=True)
csd_rlookup = pd.Series(index=csd_key.index,data=csd_key.csd)
csd_rlookup = csd_rlookup.to_dict(OrderedDict)
csd_lookup = pd.Series(index=csd_key.csd,data=csd_key.index)
csd_lookup = csd_lookup.to_dict(OrderedDict)
del csd_key
print(f'Last key is {T.B}{num_categories-1}{T.E}, representing {T.B}{csd_rlookup[num_categories-1]}{T.E}.')
def PAgivenB(A,B):
P_B = csd_value_counts[B]/total_outcomes
B_users = set(user_csd_pairs[user_csd_pairs['csd']==B]['user_id'])
A_users = set(user_csd_pairs[user_csd_pairs['csd']==A]['user_id'])
P_AiB = len(A_users.intersection(B_users))/total_outcomes
return P_AiB / P_B
print('Probability of a user buying kettle given they bought a washer:',PAgivenB('appliances.kitchen.kettle','appliances.kitchen.washer'))
%%time
P = np.zeros((num_categories, num_categories))
for i in range(num_categories):
for j in range(num_categories):
P[i,j] = PAgivenB(csd_rlookup[i],csd_rlookup[j])
P_Eye = P-np.eye(num_categories, num_categories)
max_value = np.amax(P_Eye)
print('Maximum value of the array is:',max_value)
(a,b) = np.where(P_Eye == max_value)
a,b = a[0],b[0]
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
print(f'Probability of {csd_rlookup[b]} given {csd_rlookup[a]}: {P[b,a]}')
min_value = np.amin(P)
print('Minimum value of the array is:',min_value)
np.where(P_Eye == max_value)
(a,b) = np.where(P == min_value)
a,b = a[0],b[0]
print(f'Probability of {csd_rlookup[a]} given {csd_rlookup[b]}: {P[a,b]}')
print(f"computers.notebook (id:{csd_lookup['computers.notebook']}) sales prediction")
start_time = time.time()
number_of_users = df['user_id'].unique().shape[0]
number_of_features = num_categories
def user_experience_matrix(df,category_id_to_predict,print_freq=30000):
last_index = df.shape[0]-1
# Use np.float32 for torch.cuda.FloatTensor.or np.float16 for torch.cuda.HalfTensor (float64 not recommended)
uxm = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32)
print(f" # | User | {'Category':40} | Previous | {T.b}New UX{T.E}")
ignored_keys = 0
for row in df.itertuples():
uid = uid_lookup[row.user_id]
try:
csd = csd_lookup[row.csd]
prev_ux = uxm[uid,csd]
ux = np.tanh(prev_ux+P[category_id_to_predict,csd])
uxm[uid,csd] = ux
if (row.Index % print_freq == 0) or (row.Index == last_index):
print(f"{row.Index:8} | "+
f"{uid:6} | "+
f"{row.csd:40} | "+
f"{prev_ux:8.5f} | "+
f"{T.b}{ux:8.5f}{T.E}")
except KeyError:
ignored_keys += 1
if (row.Index % print_freq == 0) or (row.Index == last_index):
print(f"{row.Index:8} | "+
f"{uid:6} | "+
f"{row.csd:40} | "+
f"{T.R} ignored category{T.E}")
print(f'Ignored keys: {ignored_keys}')
return uxm
%%time
uxm = user_experience_matrix(df,1)
print(f"Elapsed time: {time.time()-start_time:.2f} seconds")
def save_to_npz(X,path):
X = X.tocoo()
sp.save_npz(path,X)
print(f"{T.G}Sparse matrix saved to: {path}{T.E}")
print(f"Train: {VAL_THRESHOLD*100:.2f}% \nValidation: {(1-TEST_THRESHOLD)*100:.2f}% \nTest: {(1-TEST_THRESHOLD)*100:.2f}%")
NNZ = uxm.nnz
print(f"Number of stored values: {NNZ:,}")
uxm.shape
%%time
uxm_train = sp.dok_matrix.copy(uxm)
uxm_val = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32)
uxm_test = sp.dok_matrix((number_of_users, number_of_features), dtype=np.float32)
%%time
rows,cols = uxm_train.nonzero()
for row,col in zip(rows,cols):
rnd = random.random()
if rnd > TEST_THRESHOLD:
uxm_test[row,col] = uxm_train[row,col]
uxm_train[row,col] = 0
elif rnd > VAL_THRESHOLD:
uxm_val[row,col] = uxm_train[row,col]
uxm_train[row,col] = 0
print(f"Number of train data values: {uxm_train.nnz:,} ({uxm_train.nnz*100/NNZ:.2f}%)")
print(f"Number of validation data values: {uxm_val.nnz:,} ({uxm_val.nnz*100/NNZ:.2f}%)")
print(f"Number of test data values: {uxm_test.nnz:,} ({uxm_test.nnz*100/NNZ:.2f}%)")
errormessage = '''All datapoints should be in either the train, the test of the validation datasets.
The reason might be a change in how .nnz of a DOK matrix (scipy.sparse.dok_matrix) is calculated.
In version 1.5.2 SciPy setting the value to zero explicitly (X[i,j]=0) is not counted by .nnz'''
assert NNZ - uxm_train.nnz - uxm_val.nnz - uxm_test.nnz == 0, errormessage
save_to_npz(uxm,DATA_DIR+r'uxm-purchase.npz')
save_to_npz(uxm_train,DATA_DIR+r'uxm_train-purchase.npz')
# save_to_npz(uxm_val,VAL_DATA_PATH)
# save_to_npz(uxm_test,TEST_DATA_PATH)
event_counts = df.csd.value_counts()[:num_categories]
event_counts
event_counts.sum()
fig, ax = plt.subplots(figsize=(16, 9), subplot_kw=dict(aspect="equal"))
# W - width; P - precision; B - bold; N - normal
W = 2
P = 2
E = 'events'
event_perc = df.csd.value_counts(normalize=True)[:num_categories] * 100
event_type_legend = [r"\textbf{Smartphone}"+f"\n{event_perc['electronics.smartphone']:{W}.{P}f}\%\n{event_counts['electronics.smartphone']:,} {E}",
r"\textbf{Notebook}"+f"\n{event_perc['computers.notebook']:{W}.{P}f}\%\n{event_counts['computers.notebook']:,} {E}",
# r"\textbf{Refrigerator}"+f"\n{event_perc['appliances.kitchen.refrigerators']:{W}.{P}f}\%\n{event_counts['appliances.kitchen.refrigerators']:,} {E}",
# r"\textbf{Purchased}"+f"\n{event_perc['purchase']:{W}.{P}f}\%\n{event_counts['purchase']:,} {E}"
]
cmap = plt.get_cmap("tab20")
wedges, texts = ax.pie(event_counts, wedgeprops=dict(width=0.382), textprops=dict(color="w"), startangle=-40, colors=cmap([0,5,6,4]))
bbox_props = dict(boxstyle="square,pad=0.618", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
if i <2:
ax.annotate(event_type_legend[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),horizontalalignment=horizontalalignment, **kw)
fig.set_facecolor('w')
# ax.set_title("Logged user events")
plt.tight_layout()
fig.savefig(EXPORT_DIR+'purchase-percentage-of-events.png',dpi=300)
fig.savefig(EXPORT_DIR+'purchase-percentage-of-events.pdf')
plt.show()
| 0.407451 | 0.519582 |
```
%reload_ext autoreload
%autoreload 2
from nb_008 import *
```
# Rossmann
## Data preparation / Feature engineering
```
PATH=Path('data/rossmann/')
table_names = ['train', 'store', 'store_states', 'state_names', 'googletrend', 'weather', 'test']
tables = [pd.read_csv(PATH/f'{fname}.csv', low_memory=False) for fname in table_names]
train, store, store_states, state_names, googletrend, weather, test = tables
len(train),len(test)
```
We turn state Holidays to booleans, to make them more convenient for modeling. We can do calculations on pandas fields using notation very similar (often identical) to numpy.
```
train.StateHoliday = train.StateHoliday!='0'
test.StateHoliday = test.StateHoliday!='0'
```
`join_df` is a function for joining tables on specific fields. By default, we'll be doing a left outer join of `right` on the `left` argument using the given fields for each table.
Pandas does joins using the `merge` method. The `suffixes` argument describes the naming convention for duplicate fields. We've elected to leave the duplicate field names on the left untouched, and append a "\_y" to those on the right.
```
def join_df(left, right, left_on, right_on=None, suffix='_y'):
if right_on is None: right_on = left_on
return left.merge(right, how='left', left_on=left_on, right_on=right_on,
suffixes=("", suffix))
```
Join weather/state names.
```
weather = join_df(weather, state_names, "file", "StateName")
```
In pandas you can add new columns to a dataframe by simply defining it. We'll do this for googletrends by extracting dates and state names from the given data and adding those columns.
We're also going to replace all instances of state name 'NI' to match the usage in the rest of the data: 'HB,NI'. This is a good opportunity to highlight pandas indexing. We can use `.loc[rows, cols]` to select a list of rows and a list of columns from the dataframe. In this case, we're selecting rows w/ statename 'NI' by using a boolean list `googletrend.State=='NI'` and selecting "State".
```
googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]
googletrend['State'] = googletrend.file.str.split('_', expand=True)[2]
googletrend.loc[googletrend.State=='NI', "State"] = 'HB,NI'
```
The following extracts particular date fields from a complete datetime for the purpose of constructing categoricals.
You should *always* consider this feature extraction step when working with date-time. Without expanding your date-time into these additional fields, you can't capture any trend/cyclical behavior as a function of time at any of these granularities. We'll add to every table with a date field.
```
def add_datepart(df, fldname, drop=True, time=False):
"Helper function that adds columns relevant to a date."
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
add_datepart(weather, "Date", drop=False)
add_datepart(googletrend, "Date", drop=False)
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
```
The Google trends data has a special category for the whole of the Germany - we'll pull that out so we can use it explicitly.
```
trend_de = googletrend[googletrend.file == 'Rossmann_DE']
```
Now we can outer join all of our data into a single dataframe. Recall that in outer joins everytime a value in the joining field on the left table does not have a corresponding value on the right table, the corresponding row in the new table has Null values for all right table fields. One way to check that all records are consistent and complete is to check for Null values post-join, as we do here.
*Aside*: Why note just do an inner join?
If you are assuming that all records are complete and match on the field you desire, an inner join will do the same thing as an outer join. However, in the event you are wrong or a mistake is made, an outer join followed by a null-check will catch it. (Comparing before/after # of rows for inner join is equivalent, but requires keeping track of before/after row #'s. Outer join is easier.)
```
store = join_df(store, store_states, "Store")
len(store[store.State.isnull()])
joined = join_df(train, store, "Store")
joined_test = join_df(test, store, "Store")
len(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])
joined = join_df(joined, googletrend, ["State","Year", "Week"])
joined_test = join_df(joined_test, googletrend, ["State","Year", "Week"])
len(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])
joined = joined.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
joined_test = joined_test.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
len(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])
joined = join_df(joined, weather, ["State","Date"])
joined_test = join_df(joined_test, weather, ["State","Date"])
len(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])
for df in (joined, joined_test):
for c in df.columns:
if c.endswith('_y'):
if c in df.columns: df.drop(c, inplace=True, axis=1)
```
Next we'll fill in missing values to avoid complications with `NA`'s. `NA` (not available) is how Pandas indicates missing values; many models have problems when missing values are present, so it's always important to think about how to deal with them. In these cases, we are picking an arbitrary *signal value* that doesn't otherwise appear in the data.
```
for df in (joined,joined_test):
df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)
df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)
df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)
df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)
```
Next we'll extract features "CompetitionOpenSince" and "CompetitionDaysOpen". Note the use of `apply()` in mapping a function across dataframe values.
```
for df in (joined,joined_test):
df["CompetitionOpenSince"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear,
month=df.CompetitionOpenSinceMonth, day=15))
df["CompetitionDaysOpen"] = df.Date.subtract(df.CompetitionOpenSince).dt.days
```
We'll replace some erroneous / outlying data.
```
for df in (joined,joined_test):
df.loc[df.CompetitionDaysOpen<0, "CompetitionDaysOpen"] = 0
df.loc[df.CompetitionOpenSinceYear<1990, "CompetitionDaysOpen"] = 0
```
We add "CompetitionMonthsOpen" field, limiting the maximum to 2 years to limit number of unique categories.
```
for df in (joined,joined_test):
df["CompetitionMonthsOpen"] = df["CompetitionDaysOpen"]//30
df.loc[df.CompetitionMonthsOpen>24, "CompetitionMonthsOpen"] = 24
joined.CompetitionMonthsOpen.unique()
```
Same process for Promo dates. You may need to install the `isoweek` package first.
```
# If needed, uncomment:
# ! pip install isoweek
from isoweek import Week
for df in (joined,joined_test):
df["Promo2Since"] = pd.to_datetime(df.apply(lambda x: Week(
x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1).astype(pd.datetime))
df["Promo2Days"] = df.Date.subtract(df["Promo2Since"]).dt.days
for df in (joined,joined_test):
df.loc[df.Promo2Days<0, "Promo2Days"] = 0
df.loc[df.Promo2SinceYear<1990, "Promo2Days"] = 0
df["Promo2Weeks"] = df["Promo2Days"]//7
df.loc[df.Promo2Weeks<0, "Promo2Weeks"] = 0
df.loc[df.Promo2Weeks>25, "Promo2Weeks"] = 25
df.Promo2Weeks.unique()
joined.to_pickle(PATH/'joined')
joined_test.to_pickle(PATH/'joined_test')
```
## Durations
It is common when working with time series data to extract data that explains relationships across rows as opposed to columns, e.g.:
* Running averages
* Time until next event
* Time since last event
This is often difficult to do with most table manipulation frameworks, since they are designed to work with relationships across columns. As such, we've created a class to handle this type of data.
We'll define a function `get_elapsed` for cumulative counting across a sorted dataframe. Given a particular field `fld` to monitor, this function will start tracking time since the last occurrence of that field. When the field is seen again, the counter is set to zero.
Upon initialization, this will result in datetime na's until the field is encountered. This is reset every time a new store is seen. We'll see how to use this shortly.
```
def get_elapsed(fld, pre):
day1 = np.timedelta64(1, 'D')
last_date = np.datetime64()
last_store = 0
res = []
for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):
if s != last_store:
last_date = np.datetime64()
last_store = s
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[pre+fld] = res
```
We'll be applying this to a subset of columns:
```
columns = ["Date", "Store", "Promo", "StateHoliday", "SchoolHoliday"]
#df = train[columns]
df = train[columns].append(test[columns])
```
Let's walk through an example.
Say we're looking at School Holiday. We'll first sort by Store, then Date, and then call `add_elapsed('SchoolHoliday', 'After')`:
This will apply to each row with School Holiday:
* A applied to every row of the dataframe in order of store and date
* Will add to the dataframe the days since seeing a School Holiday
* If we sort in the other direction, this will count the days until another holiday.
```
fld = 'SchoolHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
```
We'll do this for two more fields.
```
fld = 'StateHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'Promo'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
```
We're going to set the active index to Date.
```
df = df.set_index("Date")
```
Then set null values from elapsed field calculations to 0.
```
columns = ['SchoolHoliday', 'StateHoliday', 'Promo']
for o in ['Before', 'After']:
for p in columns:
a = o+p
df[a] = df[a].fillna(0).astype(int)
```
Next we'll demonstrate window functions in pandas to calculate rolling quantities.
Here we're sorting by date (`sort_index()`) and counting the number of events of interest (`sum()`) defined in `columns` in the following week (`rolling()`), grouped by Store (`groupby()`). We do the same in the opposite direction.
```
bwd = df[['Store']+columns].sort_index().groupby("Store").rolling(7, min_periods=1).sum()
fwd = df[['Store']+columns].sort_index(ascending=False
).groupby("Store").rolling(7, min_periods=1).sum()
```
Next we want to drop the Store indices grouped together in the window function.
Often in pandas, there is an option to do this in place. This is time and memory efficient when working with large datasets.
```
bwd.drop('Store',1,inplace=True)
bwd.reset_index(inplace=True)
fwd.drop('Store',1,inplace=True)
fwd.reset_index(inplace=True)
df.reset_index(inplace=True)
```
Now we'll merge these values onto the df.
```
df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])
df = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])
df.drop(columns,1,inplace=True)
df.head()
```
It's usually a good idea to back up large tables of extracted / wrangled features before you join them onto another one, that way you can go back to it easily if you need to make changes to it.
```
df.to_pickle(PATH/'df')
df["Date"] = pd.to_datetime(df.Date)
df.columns
joined = pd.read_pickle(PATH/'joined')
joined_test = pd.read_pickle(PATH/f'joined_test')
joined = join_df(joined, df, ['Store', 'Date'])
joined_test = join_df(joined_test, df, ['Store', 'Date'])
```
The authors also removed all instances where the store had zero sale / was closed. We speculate that this may have cost them a higher standing in the competition. One reason this may be the case is that a little exploratory data analysis reveals that there are often periods where stores are closed, typically for refurbishment. Before and after these periods, there are naturally spikes in sales that one might expect. By ommitting this data from their training, the authors gave up the ability to leverage information about these periods to predict this otherwise volatile behavior.
```
joined = joined[joined.Sales!=0]
```
We'll back this up as well.
```
joined.reset_index(inplace=True)
joined_test.reset_index(inplace=True)
joined.to_pickle(PATH/'train_clean')
joined_test.to_pickle(PATH/'test_clean')
```
|
github_jupyter
|
%reload_ext autoreload
%autoreload 2
from nb_008 import *
PATH=Path('data/rossmann/')
table_names = ['train', 'store', 'store_states', 'state_names', 'googletrend', 'weather', 'test']
tables = [pd.read_csv(PATH/f'{fname}.csv', low_memory=False) for fname in table_names]
train, store, store_states, state_names, googletrend, weather, test = tables
len(train),len(test)
train.StateHoliday = train.StateHoliday!='0'
test.StateHoliday = test.StateHoliday!='0'
def join_df(left, right, left_on, right_on=None, suffix='_y'):
if right_on is None: right_on = left_on
return left.merge(right, how='left', left_on=left_on, right_on=right_on,
suffixes=("", suffix))
weather = join_df(weather, state_names, "file", "StateName")
googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]
googletrend['State'] = googletrend.file.str.split('_', expand=True)[2]
googletrend.loc[googletrend.State=='NI', "State"] = 'HB,NI'
def add_datepart(df, fldname, drop=True, time=False):
"Helper function that adds columns relevant to a date."
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
add_datepart(weather, "Date", drop=False)
add_datepart(googletrend, "Date", drop=False)
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
trend_de = googletrend[googletrend.file == 'Rossmann_DE']
store = join_df(store, store_states, "Store")
len(store[store.State.isnull()])
joined = join_df(train, store, "Store")
joined_test = join_df(test, store, "Store")
len(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])
joined = join_df(joined, googletrend, ["State","Year", "Week"])
joined_test = join_df(joined_test, googletrend, ["State","Year", "Week"])
len(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])
joined = joined.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
joined_test = joined_test.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
len(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])
joined = join_df(joined, weather, ["State","Date"])
joined_test = join_df(joined_test, weather, ["State","Date"])
len(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])
for df in (joined, joined_test):
for c in df.columns:
if c.endswith('_y'):
if c in df.columns: df.drop(c, inplace=True, axis=1)
for df in (joined,joined_test):
df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)
df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)
df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)
df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)
for df in (joined,joined_test):
df["CompetitionOpenSince"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear,
month=df.CompetitionOpenSinceMonth, day=15))
df["CompetitionDaysOpen"] = df.Date.subtract(df.CompetitionOpenSince).dt.days
for df in (joined,joined_test):
df.loc[df.CompetitionDaysOpen<0, "CompetitionDaysOpen"] = 0
df.loc[df.CompetitionOpenSinceYear<1990, "CompetitionDaysOpen"] = 0
for df in (joined,joined_test):
df["CompetitionMonthsOpen"] = df["CompetitionDaysOpen"]//30
df.loc[df.CompetitionMonthsOpen>24, "CompetitionMonthsOpen"] = 24
joined.CompetitionMonthsOpen.unique()
# If needed, uncomment:
# ! pip install isoweek
from isoweek import Week
for df in (joined,joined_test):
df["Promo2Since"] = pd.to_datetime(df.apply(lambda x: Week(
x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1).astype(pd.datetime))
df["Promo2Days"] = df.Date.subtract(df["Promo2Since"]).dt.days
for df in (joined,joined_test):
df.loc[df.Promo2Days<0, "Promo2Days"] = 0
df.loc[df.Promo2SinceYear<1990, "Promo2Days"] = 0
df["Promo2Weeks"] = df["Promo2Days"]//7
df.loc[df.Promo2Weeks<0, "Promo2Weeks"] = 0
df.loc[df.Promo2Weeks>25, "Promo2Weeks"] = 25
df.Promo2Weeks.unique()
joined.to_pickle(PATH/'joined')
joined_test.to_pickle(PATH/'joined_test')
def get_elapsed(fld, pre):
day1 = np.timedelta64(1, 'D')
last_date = np.datetime64()
last_store = 0
res = []
for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):
if s != last_store:
last_date = np.datetime64()
last_store = s
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[pre+fld] = res
columns = ["Date", "Store", "Promo", "StateHoliday", "SchoolHoliday"]
#df = train[columns]
df = train[columns].append(test[columns])
fld = 'SchoolHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'StateHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'Promo'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
df = df.set_index("Date")
columns = ['SchoolHoliday', 'StateHoliday', 'Promo']
for o in ['Before', 'After']:
for p in columns:
a = o+p
df[a] = df[a].fillna(0).astype(int)
bwd = df[['Store']+columns].sort_index().groupby("Store").rolling(7, min_periods=1).sum()
fwd = df[['Store']+columns].sort_index(ascending=False
).groupby("Store").rolling(7, min_periods=1).sum()
bwd.drop('Store',1,inplace=True)
bwd.reset_index(inplace=True)
fwd.drop('Store',1,inplace=True)
fwd.reset_index(inplace=True)
df.reset_index(inplace=True)
df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])
df = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])
df.drop(columns,1,inplace=True)
df.head()
df.to_pickle(PATH/'df')
df["Date"] = pd.to_datetime(df.Date)
df.columns
joined = pd.read_pickle(PATH/'joined')
joined_test = pd.read_pickle(PATH/f'joined_test')
joined = join_df(joined, df, ['Store', 'Date'])
joined_test = join_df(joined_test, df, ['Store', 'Date'])
joined = joined[joined.Sales!=0]
joined.reset_index(inplace=True)
joined_test.reset_index(inplace=True)
joined.to_pickle(PATH/'train_clean')
joined_test.to_pickle(PATH/'test_clean')
| 0.356895 | 0.928539 |
## Course Objectives
- Retrieve desired result set using the different types of joins in SQL
- Join more than two tables in a database using SQL Joins
## Course Structure
This course is divided into 5 parts:
- Course Overview: This introductory reading material.
- Reading: SQL files for project
- Mastering SQL Joins: This is the hands on project that we will work on in Rhyme.
- Ungraded Quiz: Check your understanding of the concepts learned in the hands on project with this ungraded quiz.
- Graded Quiz: This is the final assignment that you need to pass in order to finish the course successfully.
### 1. Getting Started
##########################################################
##########################################################
-- Guided Project: Mastering SQL Joins in PostgreSQL
##########################################################
##########################################################
#############################
-- Task One: Getting Started
-- In this task, we will retrieve data from the dept_manager_dup and
-- departments_dup tables in the database
#############################
-- 1.1: Retrieve all data from the dept_manager_dup table
__select * from dept_manager_dup
order by dept_no;__
-- 1.2: Retrieve all data from the departments_dup table
__select * from departments_dup
order by dept_no;__
#### There are basically 4 types of joins in SQL:
- Inner Join
- Left Join
- Right Join
- Full Join
Other:
- Self-Join
- Cross-Join

### 2. Inner Joins
```
# Assume that department no is unique in the 2 tables.
```
__SELECT m.emp_no, m.dept_no, d.dept_name__
__FROM dept_manager_dup m__
__INNER JOIN departments_dup d__ -- INNER JOIN or JOIN both will work the same
__ON m.dept_no = d.dept_no__
__ORDER BY m.dept_no;__
### 3. Duplicate Records
```
## Now, assume that the 2 tables has duplicate value, i.e., redundant enteries
## this will never happen in good company databases as you will be joining using Primary keys
```
__SELECT m.emp_no, m.dept_no, d.dept_name__
__FROM dept_manager_dup m__
__INNER JOIN departments_dup d__
__ON m.dept_no = d.dept_no__
__GROUP BY m.emp_no, m.dept_no, d.dept_name__ -- this clause will take care of the duplicate enteries. Make sure that group by is for all selected coloumns.
__ORDER BY m.dept_no;__
### 4. Left Join or Left Outer Join
__SELECT m.dept_no, m.emp_no, d.dept_name__
__FROM dept_manager_dup m__
__LEFT JOIN departments_dup d__ -- LEFT JOIN or LEFT OUTER JOIN both will work the same
__ON m.dept_no = d.dept_no__
__ORDER BY m.dept_no__
This will show all the elements from m table. Elements (in m) which are not matching will be shown as null
### 5. RIGHT JOIN
Same as Left Join
### 6. JOIN and WHERE clause used together
__SELECT e.emp_no, e.first_name, e.last_name, dm.dept_no, dm.from_date__
__FROM employees e__
__LEFT JOIN dept_manager dm__
__ON e.emp_no = edm.emp_no__
__WHERE e.hire_date < '1985-01-31'__
__ORDER BY dm.dept_no, e.emp_np;__
### 7. Using Aggregate Functions with Joins
SQL Aggregate functions -> count, sum, min, max and avg
-- GENGER IS IN EMPLOEES TABLE, SALARY IS IN SALARIES TABLE. FIND AVG SALARY OF ALL GENDERS
__SELECT e.gender, ROUND(AVG(salary), 2) AS avg_salary__
__FROM employees e__
__JOIN salaries s__
__ON e.emp_no = s.emp_no__
__GROUP BY e.gender;__
### 8. Join more than two tables in SQL
Task -> Extract a list of managers with the following details
- employees table -> first name, Last name
- department mnanager's table -> dept_no
- department table - > department_name
__SELECT e.emp_no, e.first_name, e.last_name, m.dept_no, e.hire_date, m.to_date, d.dept_name__
__FROM employees ee.hire_date,__
__JOIN dept_manager m__
__ON e.emp_no = m.emp_no__
__JOIN departments d__
__ON m.dept_no = d.dept_no;__
-- Having clause
SELECT COUNT(CustomerID), Country
FROM Customers
GROUP BY Country
HAVING COUNT(CustomerID) > 5;
|
github_jupyter
|
# Assume that department no is unique in the 2 tables.
## Now, assume that the 2 tables has duplicate value, i.e., redundant enteries
## this will never happen in good company databases as you will be joining using Primary keys
| 0.351645 | 0.866698 |
### Configure Notebook
#### Import Libraries
```
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from io import BytesIO
import pandas as pd
import numpy as np
```
#### Define Functions
```
def load_encrypted_dataset(encrypted_file, decryption_key_path):
''' A function uses a decryption key to decrypt a file.
Parameters:
encrypted_file (str): The path to a file which has been encrypted.
decryption_key_path (str): The path to the corresponding decryption key.
Returns:
decrypted_dataset (Pandas DataFrame): A decrypted version of the encrypted file.
'''
file = open(decryption_key_path, 'rb') # Open file.
decryption_key = file.read() # Read decryption key.
file.close() # Close file.
fernet = Fernet(decryption_key)
with open(encrypted_file, 'rb') as f:
encrypted_file = f.read()
decrypted_file = fernet.decrypt(encrypted_file) # Decrypt file.
decrypted_dataset = pd.read_csv(BytesIO(decrypted_file), index_col=0) # Convert bytes data to Pandas DataFrame.
return decrypted_dataset
```
### Test
#### Generate Random Dataset
```
unsecure_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
unsecure_df.to_csv('unsecure.csv') # Export corrected dataset.
unsecure_df
```
#### Generate Encryption Key
```
password_provided = 'password' # Provide password.
password = password_provided.encode() # Encode password.
salt = b"randomsalt" # Provide salt key.
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend())
key = base64.urlsafe_b64encode(kdf.derive(password))
file = open(f'encryption_key.key', 'wb')
file.write(key)
file.close()
```
#### Encrypt Dataset
```
file = open(f'encryption_key.key', 'rb')
key = file.read()
file.close()
# Open the file to encrypt
with open(f'unsecure.csv', 'rb') as f: # open dataset file.
dataset = f.read()
fernet = Fernet(key)
encrypted_dataset = fernet.encrypt(dataset)
# Write the encrypted file
with open(f'secure.csv.encrypted', 'wb') as f: # Encrypt dataset csv file.
f.write(encrypted_dataset)
encrypted_dataset
```
#### Decrypt Dataset
```
decrypted_dataset = load_encrypted_dataset(encrypted_file='secure.csv.encrypted', decryption_key_path='encryption_key.key')
decrypted_dataset
```
|
github_jupyter
|
import base64
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from io import BytesIO
import pandas as pd
import numpy as np
def load_encrypted_dataset(encrypted_file, decryption_key_path):
''' A function uses a decryption key to decrypt a file.
Parameters:
encrypted_file (str): The path to a file which has been encrypted.
decryption_key_path (str): The path to the corresponding decryption key.
Returns:
decrypted_dataset (Pandas DataFrame): A decrypted version of the encrypted file.
'''
file = open(decryption_key_path, 'rb') # Open file.
decryption_key = file.read() # Read decryption key.
file.close() # Close file.
fernet = Fernet(decryption_key)
with open(encrypted_file, 'rb') as f:
encrypted_file = f.read()
decrypted_file = fernet.decrypt(encrypted_file) # Decrypt file.
decrypted_dataset = pd.read_csv(BytesIO(decrypted_file), index_col=0) # Convert bytes data to Pandas DataFrame.
return decrypted_dataset
unsecure_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
unsecure_df.to_csv('unsecure.csv') # Export corrected dataset.
unsecure_df
password_provided = 'password' # Provide password.
password = password_provided.encode() # Encode password.
salt = b"randomsalt" # Provide salt key.
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend())
key = base64.urlsafe_b64encode(kdf.derive(password))
file = open(f'encryption_key.key', 'wb')
file.write(key)
file.close()
file = open(f'encryption_key.key', 'rb')
key = file.read()
file.close()
# Open the file to encrypt
with open(f'unsecure.csv', 'rb') as f: # open dataset file.
dataset = f.read()
fernet = Fernet(key)
encrypted_dataset = fernet.encrypt(dataset)
# Write the encrypted file
with open(f'secure.csv.encrypted', 'wb') as f: # Encrypt dataset csv file.
f.write(encrypted_dataset)
encrypted_dataset
decrypted_dataset = load_encrypted_dataset(encrypted_file='secure.csv.encrypted', decryption_key_path='encryption_key.key')
decrypted_dataset
| 0.729712 | 0.681692 |
<a href="https://colab.research.google.com/github/fadelramli/Tugas-MachineLearning/blob/main/Week11_Lenet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip install d2l==0.17.1
import torch
from torch import nn
from d2l import torch as d2l
net = nn.Sequential(
nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(),
nn.Linear(84, 10))
X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape: \t',X.shape)
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)
def evaluate_accuracy_gpu(net, data_iter, device=None):
"""Compute the accuracy for a model on a dataset using a GPU."""
if isinstance(net, nn.Module):
net.eval() # Set the model to evaluation mode
if not device:
device = next(iter(net.parameters())).device
# No. of correct predictions, no. of predictions
metric = d2l.Accumulator(2)
with torch.no_grad():
for X, y in data_iter:
if isinstance(X, list):
# Required for BERT Fine-tuning (to be covered later)
X = [x.to(device) for x in X]
else:
X = X.to(device)
y = y.to(device)
metric.add(d2l.accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
"""Train a model with a GPU (defined in Chapter 6)."""
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)
print('training on', device)
net.to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
# Sum of training loss, sum of training accuracy, no. of examples
metric = d2l.Accumulator(3)
net.train()
for i, (X, y) in enumerate(train_iter):
timer.start()
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with torch.no_grad():
metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
lr, num_epochs = 0.9, 10
train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
```
|
github_jupyter
|
!pip install d2l==0.17.1
import torch
from torch import nn
from d2l import torch as d2l
net = nn.Sequential(
nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(),
nn.Linear(84, 10))
X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape: \t',X.shape)
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)
def evaluate_accuracy_gpu(net, data_iter, device=None):
"""Compute the accuracy for a model on a dataset using a GPU."""
if isinstance(net, nn.Module):
net.eval() # Set the model to evaluation mode
if not device:
device = next(iter(net.parameters())).device
# No. of correct predictions, no. of predictions
metric = d2l.Accumulator(2)
with torch.no_grad():
for X, y in data_iter:
if isinstance(X, list):
# Required for BERT Fine-tuning (to be covered later)
X = [x.to(device) for x in X]
else:
X = X.to(device)
y = y.to(device)
metric.add(d2l.accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
"""Train a model with a GPU (defined in Chapter 6)."""
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)
print('training on', device)
net.to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
# Sum of training loss, sum of training accuracy, no. of examples
metric = d2l.Accumulator(3)
net.train()
for i, (X, y) in enumerate(train_iter):
timer.start()
optimizer.zero_grad()
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with torch.no_grad():
metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
lr, num_epochs = 0.9, 10
train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
| 0.920258 | 0.940517 |
# Static Gesture Recognition using Neural Network
Por que usar o mediapipe ao invés de visão computacional?
Ao utilizar visão computacional (pixels) é necessário lidar com uma alta gama de variações de parâmetros como cor, tamanho, fundo da imagem e outras inúmeras dificuldades relacionadas à extração de características através dos píxels.
Por outro lado, com o mediapipe lideremos diretamente com a extração de keypoints das mãos, informações que nos dará as informações que precisamos de maneira "pré filtrada" sem ter que lidar com uma enorme variedade de parâmetros.
## Princípio de funcionamento
O mediapipe será usado para extrair as keypoints das mãos, cada keypoint terá uma coordenada X e Y, que ficará armezanada em uma lista, como por exemplo [220, 450], e a mesma estará dentro de uma segunda lista que possuirá as coordenadas de todos os 21 pontos.

Sem seguida essas listas serão usadas para treinar uma rede (provavelmente FeedFoward)
## Bibliotecas e Args
```
import cv2 as cv
import numpy as np
import mediapipe as mp
import csv
import copy
import argparse
import itertools
from collections import Counter
from collections import deque
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device, type=int, default=0")
parser.add_argument("--width, type=int, default=960")
parser.add_argument("--height, type=int, default=540")
parser.add_argument("--use_static_image_mode", action="store_true")
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.7)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=float,
default=0.5)
args = parser.parse_args()
return args
#---------Camera configs-------------------
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 960)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 540)
#----------Model load---------------------
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode='store_true',
max_num_hands=1,
min_detection_confidence=0.7,
min_tracking_confidence=0.5,
)
#---------------FPS-----------------------
cvFpsCalc = CvFpsCalc(buffer_len=10)
history_length = 16
point_history = deque(maxlen=history_length)
use_brect = True
finger_gesture_history = deque(maxlen=history_length)
mode = 0
def calc_bounding_rect(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_array = np.empty((0, 2), int)
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
landmark_array = np.append(landmark_array, landmark_point, axis=0)
x, y, w, h = cv.boundingRect(landmark_array)
return [x, y, x + w, y + h]
def calc_landmark_list(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
# Keypoint
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
# landmark_z = landmark.z
landmark_point.append([landmark_x, landmark_y])
return landmark_point
def pre_process_landmark(landmark_list):
temp_landmark_list = copy.deepcopy(landmark_list)
# Convert to relative coordinates
base_x, base_y = 0, 0
for index, landmark_point in enumerate(temp_landmark_list):
if index == 0:
base_x, base_y = landmark_point[0], landmark_point[1]
temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x
temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y
# Convert to a one-dimensional list
temp_landmark_list = list(
itertools.chain.from_iterable(temp_landmark_list))
# Normalization
max_value = max(list(map(abs, temp_landmark_list)))
def normalize_(n):
return n / max_value
temp_landmark_list = list(map(normalize_, temp_landmark_list))
return temp_landmark_list
def pre_process_point_history(image, point_history):
image_width, image_height = image.shape[1], image.shape[0]
temp_point_history = copy.deepcopy(point_history)
# Convert to relative coordinates
base_x, base_y = 0, 0
for index, point in enumerate(temp_point_history):
if index == 0:
base_x, base_y = point[0], point[1]
temp_point_history[index][0] = (temp_point_history[index][0] -
base_x) / image_width
temp_point_history[index][1] = (temp_point_history[index][1] -
base_y) / image_height
# Convert to a one-dimensional list
temp_point_history = list(
itertools.chain.from_iterable(temp_point_history))
return temp_point_history
def draw_landmarks(image, landmark_point):
if len(landmark_point) > 0:
# Thumb
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(255, 255, 255), 2)
# Index finger
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(255, 255, 255), 2)
# Middle finger
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
(255, 255, 255), 2)
# Ring finger
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
(255, 255, 255), 2)
# Little finger
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
(255, 255, 255), 2)
# Palm
cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
(255, 255, 255), 2)
# Key Points
for index, landmark in enumerate(landmark_point):
if index == 0: # 手首1
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 1: # 手首2
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 2: # 親指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 3: # 親指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 4: # 親指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 5: # 人差指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 6: # 人差指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 7: # 人差指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 8: # 人差指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 9: # 中指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 10: # 中指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 11: # 中指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 12: # 中指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 13: # 薬指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 14: # 薬指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 15: # 薬指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 16: # 薬指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 17: # 小指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 18: # 小指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 19: # 小指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 20: # 小指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
return image
def draw_bounding_rect(use_brect, image, brect):
if use_brect:
# Outer rectangle
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
(0, 0, 0), 1)
return image
def draw_info(image, fps, mode, number):
cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
1.0, (0, 0, 0), 4, cv.LINE_AA)
cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
1.0, (255, 255, 255), 2, cv.LINE_AA)
return image
def select_mode(key, mode):
number = -1
if 48 <= key <= 57: # 0 ~ 9
number = key - 48
if key == 110: # n
mode = 0
if key == 107: # k
mode = 1
if key == 104: # h
mode = 2
return number, mode
while True:
fps = cvFpsCalc.get()
key = cv.waitKey(10)
if key == 27: # ESC
break
number, mode = select_mode(key, mode)
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # Mirror display
debug_image = copy.deepcopy(image)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
if results.multi_hand_landmarks is not None:
for hand_landmarks, handedness in zip(results.multi_hand_landmarks,
results.multi_handedness):
brect = calc_bounding_rect(debug_image, hand_landmarks)
landmark_list = calc_landmark_list(debug_image, hand_landmarks)
print(landmark_list)
pre_processed_landmark_list = pre_process_landmark(
landmark_list)
pre_processed_point_history_list = pre_process_point_history(
debug_image, point_history)
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
debug_image = draw_landmarks(debug_image, landmark_list)
else:
point_history.append([0, 0])
#debug_image = draw_point_history(debug_image, point_history)
debug_image = draw_info(debug_image, fps, mode, number)
cv.imshow('Hand Gesture Recognition', debug_image)
cap.release()
cv.destroyAllWindows()
landmark_list
pre_processed_landmark_list
```
|
github_jupyter
|
import cv2 as cv
import numpy as np
import mediapipe as mp
import csv
import copy
import argparse
import itertools
from collections import Counter
from collections import deque
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device, type=int, default=0")
parser.add_argument("--width, type=int, default=960")
parser.add_argument("--height, type=int, default=540")
parser.add_argument("--use_static_image_mode", action="store_true")
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.7)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=float,
default=0.5)
args = parser.parse_args()
return args
#---------Camera configs-------------------
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 960)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 540)
#----------Model load---------------------
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode='store_true',
max_num_hands=1,
min_detection_confidence=0.7,
min_tracking_confidence=0.5,
)
#---------------FPS-----------------------
cvFpsCalc = CvFpsCalc(buffer_len=10)
history_length = 16
point_history = deque(maxlen=history_length)
use_brect = True
finger_gesture_history = deque(maxlen=history_length)
mode = 0
def calc_bounding_rect(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_array = np.empty((0, 2), int)
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
landmark_array = np.append(landmark_array, landmark_point, axis=0)
x, y, w, h = cv.boundingRect(landmark_array)
return [x, y, x + w, y + h]
def calc_landmark_list(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
# Keypoint
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
# landmark_z = landmark.z
landmark_point.append([landmark_x, landmark_y])
return landmark_point
def pre_process_landmark(landmark_list):
temp_landmark_list = copy.deepcopy(landmark_list)
# Convert to relative coordinates
base_x, base_y = 0, 0
for index, landmark_point in enumerate(temp_landmark_list):
if index == 0:
base_x, base_y = landmark_point[0], landmark_point[1]
temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x
temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y
# Convert to a one-dimensional list
temp_landmark_list = list(
itertools.chain.from_iterable(temp_landmark_list))
# Normalization
max_value = max(list(map(abs, temp_landmark_list)))
def normalize_(n):
return n / max_value
temp_landmark_list = list(map(normalize_, temp_landmark_list))
return temp_landmark_list
def pre_process_point_history(image, point_history):
image_width, image_height = image.shape[1], image.shape[0]
temp_point_history = copy.deepcopy(point_history)
# Convert to relative coordinates
base_x, base_y = 0, 0
for index, point in enumerate(temp_point_history):
if index == 0:
base_x, base_y = point[0], point[1]
temp_point_history[index][0] = (temp_point_history[index][0] -
base_x) / image_width
temp_point_history[index][1] = (temp_point_history[index][1] -
base_y) / image_height
# Convert to a one-dimensional list
temp_point_history = list(
itertools.chain.from_iterable(temp_point_history))
return temp_point_history
def draw_landmarks(image, landmark_point):
if len(landmark_point) > 0:
# Thumb
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
(255, 255, 255), 2)
# Index finger
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
(255, 255, 255), 2)
# Middle finger
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
(255, 255, 255), 2)
# Ring finger
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
(255, 255, 255), 2)
# Little finger
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
(255, 255, 255), 2)
# Palm
cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
(255, 255, 255), 2)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
(0, 0, 0), 6)
cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
(255, 255, 255), 2)
# Key Points
for index, landmark in enumerate(landmark_point):
if index == 0: # 手首1
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 1: # 手首2
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 2: # 親指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 3: # 親指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 4: # 親指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 5: # 人差指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 6: # 人差指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 7: # 人差指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 8: # 人差指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 9: # 中指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 10: # 中指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 11: # 中指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 12: # 中指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 13: # 薬指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 14: # 薬指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 15: # 薬指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 16: # 薬指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
if index == 17: # 小指:付け根
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 18: # 小指:第2関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 19: # 小指:第1関節
cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
if index == 20: # 小指:指先
cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
-1)
cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
return image
def draw_bounding_rect(use_brect, image, brect):
if use_brect:
# Outer rectangle
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
(0, 0, 0), 1)
return image
def draw_info(image, fps, mode, number):
cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
1.0, (0, 0, 0), 4, cv.LINE_AA)
cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
1.0, (255, 255, 255), 2, cv.LINE_AA)
return image
def select_mode(key, mode):
number = -1
if 48 <= key <= 57: # 0 ~ 9
number = key - 48
if key == 110: # n
mode = 0
if key == 107: # k
mode = 1
if key == 104: # h
mode = 2
return number, mode
while True:
fps = cvFpsCalc.get()
key = cv.waitKey(10)
if key == 27: # ESC
break
number, mode = select_mode(key, mode)
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # Mirror display
debug_image = copy.deepcopy(image)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image.flags.writeable = False
results = hands.process(image)
image.flags.writeable = True
if results.multi_hand_landmarks is not None:
for hand_landmarks, handedness in zip(results.multi_hand_landmarks,
results.multi_handedness):
brect = calc_bounding_rect(debug_image, hand_landmarks)
landmark_list = calc_landmark_list(debug_image, hand_landmarks)
print(landmark_list)
pre_processed_landmark_list = pre_process_landmark(
landmark_list)
pre_processed_point_history_list = pre_process_point_history(
debug_image, point_history)
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
debug_image = draw_landmarks(debug_image, landmark_list)
else:
point_history.append([0, 0])
#debug_image = draw_point_history(debug_image, point_history)
debug_image = draw_info(debug_image, fps, mode, number)
cv.imshow('Hand Gesture Recognition', debug_image)
cap.release()
cv.destroyAllWindows()
landmark_list
pre_processed_landmark_list
| 0.492676 | 0.816077 |
```
%matplotlib inline
import elephant
import matplotlib.pyplot as plt
import numpy as np
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from quantities import ms, s, Hz
from elephant.spike_train_generation import homogeneous_poisson_process, homogeneous_gamma_process
np.random.seed(28) # to make the results reproducible
spiketrain1 = elephant.spike_train_generation.homogeneous_poisson_process(rate=10 * pq.Hz,
t_start=0. * pq.ms,
t_stop=10000. * pq.ms)
spiketrain2 = elephant.spike_train_generation.homogeneous_gamma_process(a=3,
b=10 * pq.Hz,
t_start=0. * pq.ms,
t_stop=10000. * pq.ms)
print(f"spiketrain1 has {len(spiketrain1)} spikes:")
print(" t_start:", spiketrain1.t_start)
print(" t_stop:", spiketrain1.t_stop)
print(" spike times:", spiketrain1.times)
print(f"spiketrain2 has {len(spiketrain2)} spikes:")
print(" t_start:", spiketrain2.t_start)
print(" t_stop:", spiketrain2.t_stop)
print(" spike times:", spiketrain2.times)
plt.figure(figsize=(8, 3))
plt.eventplot([spiketrain1.magnitude, spiketrain2.magnitude], linelengths=0.75, color='black')
plt.xlabel('Time (ms)', fontsize=16)
plt.yticks([0,1], labels=["spiketrain1", "spiketrain2"], fontsize=16)
plt.show()
```
### Mean firing rate
The simplest approach is to assume a stationary firing rate and only use the total number of spikes and the duration of the spike train to calculate the average number of spikes per time unit. This results in a single value for a given spiketrain.
```
mean_frate1 = elephant.statistics.mean_firing_rate(spiketrain1)
mean_frate1.units = pq.Hz
print("The mean firing rate of spiketrain1 is", mean_frate1)
mean_frate2 = elephant.statistics.mean_firing_rate(spiketrain2)
mean_frate2.units = pq.Hz
print("The mean firing rate of spiketrain2 is", mean_frate2)
print("The mean firing rate of spiketrain1 is", elephant.statistics.mean_firing_rate(spiketrain1))
print("The mean firing rate of spiketrain2 is", elephant.statistics.mean_firing_rate(spiketrain2))
```
The mean firing rate of `spiketrain1` is higher than of `spiketrain2` as expected from the raster plot.
Let’s quickly check the correctness of the `mean_firing_rate()` function by computing the firing rates manually:
```
fr1 = len(spiketrain1) / (spiketrain1.t_stop - spiketrain1.t_start)
fr2 = len(spiketrain2) / (spiketrain2.t_stop - spiketrain2.t_start)
print("The mean firing rate of spiketrain1 is", fr1)
print("The mean firing rate of spiketrain2 is", fr2)
```
Additionally, the period within the spike train during which to estimate the firing rate can be further limited using the `t_start` and `t_stop` keyword arguments. Here, we limit the firing rate estimation to the first second of the spiketrain.
```
elephant.statistics.mean_firing_rate(spiketrain1, t_start=0*pq.ms, t_stop=1000*pq.ms)
```
### Coefficient of Variation (CV)
In this section we will numerically verify that the coefficient of variation (CV), a measure of the variability of inter-spike intervals, of a spike train that is modeled as a random (stochastic) Poisson process, is 1.
Let us generate 100 independent Poisson spike trains for 100 seconds each with a rate of 10 Hz for which we later will calculate the CV. For simplicity, we will store the spike trains in a list.
```
spiketrain_list = [
elephant.spike_train_generation.homogeneous_poisson_process(rate=10.0*pq.Hz,
t_start=0.0*pq.s,
t_stop=100.0*pq.s)
for _ in range(100)]
plt.figure(dpi=150)
plt.eventplot([st.magnitude for st in spiketrain_list], linelengths=0.75, linewidths=0.75, color='black')
plt.xlabel("Time, s")
plt.ylabel("Neuron id")
plt.xlim([0, 1])
plt.show()
```
From the plot you can see the random nature of each Poisson spike train. Let us verify it numerically by calculating the distribution of the 100 CVs obtained from inter-spike intervals (ISIs) of these spike trains.
For each spike train in our list, we first call the `isi()` function which returns an array of all N-1 ISIs for the N spikes in the input spike train. We then feed the list of ISIs into the `cv()` function, which returns a single value for the coefficient of variation:
```
from elephant.statistics import isi, cv
cv_list = [cv(isi(spiketrain)) for spiketrain in spiketrain_list]
plt.figure(dpi=100)
plt.hist(cv_list)
plt.xlabel('CV')
plt.ylabel('count')
plt.title("Coefficient of Variation of homogeneous Poisson process")
plt.show()
print(np.mean(cv_list))
```
As predicted by theory, the CV values are clustered around 1.
|
github_jupyter
|
%matplotlib inline
import elephant
import matplotlib.pyplot as plt
import numpy as np
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from quantities import ms, s, Hz
from elephant.spike_train_generation import homogeneous_poisson_process, homogeneous_gamma_process
np.random.seed(28) # to make the results reproducible
spiketrain1 = elephant.spike_train_generation.homogeneous_poisson_process(rate=10 * pq.Hz,
t_start=0. * pq.ms,
t_stop=10000. * pq.ms)
spiketrain2 = elephant.spike_train_generation.homogeneous_gamma_process(a=3,
b=10 * pq.Hz,
t_start=0. * pq.ms,
t_stop=10000. * pq.ms)
print(f"spiketrain1 has {len(spiketrain1)} spikes:")
print(" t_start:", spiketrain1.t_start)
print(" t_stop:", spiketrain1.t_stop)
print(" spike times:", spiketrain1.times)
print(f"spiketrain2 has {len(spiketrain2)} spikes:")
print(" t_start:", spiketrain2.t_start)
print(" t_stop:", spiketrain2.t_stop)
print(" spike times:", spiketrain2.times)
plt.figure(figsize=(8, 3))
plt.eventplot([spiketrain1.magnitude, spiketrain2.magnitude], linelengths=0.75, color='black')
plt.xlabel('Time (ms)', fontsize=16)
plt.yticks([0,1], labels=["spiketrain1", "spiketrain2"], fontsize=16)
plt.show()
mean_frate1 = elephant.statistics.mean_firing_rate(spiketrain1)
mean_frate1.units = pq.Hz
print("The mean firing rate of spiketrain1 is", mean_frate1)
mean_frate2 = elephant.statistics.mean_firing_rate(spiketrain2)
mean_frate2.units = pq.Hz
print("The mean firing rate of spiketrain2 is", mean_frate2)
print("The mean firing rate of spiketrain1 is", elephant.statistics.mean_firing_rate(spiketrain1))
print("The mean firing rate of spiketrain2 is", elephant.statistics.mean_firing_rate(spiketrain2))
fr1 = len(spiketrain1) / (spiketrain1.t_stop - spiketrain1.t_start)
fr2 = len(spiketrain2) / (spiketrain2.t_stop - spiketrain2.t_start)
print("The mean firing rate of spiketrain1 is", fr1)
print("The mean firing rate of spiketrain2 is", fr2)
elephant.statistics.mean_firing_rate(spiketrain1, t_start=0*pq.ms, t_stop=1000*pq.ms)
spiketrain_list = [
elephant.spike_train_generation.homogeneous_poisson_process(rate=10.0*pq.Hz,
t_start=0.0*pq.s,
t_stop=100.0*pq.s)
for _ in range(100)]
plt.figure(dpi=150)
plt.eventplot([st.magnitude for st in spiketrain_list], linelengths=0.75, linewidths=0.75, color='black')
plt.xlabel("Time, s")
plt.ylabel("Neuron id")
plt.xlim([0, 1])
plt.show()
from elephant.statistics import isi, cv
cv_list = [cv(isi(spiketrain)) for spiketrain in spiketrain_list]
plt.figure(dpi=100)
plt.hist(cv_list)
plt.xlabel('CV')
plt.ylabel('count')
plt.title("Coefficient of Variation of homogeneous Poisson process")
plt.show()
print(np.mean(cv_list))
| 0.540681 | 0.894513 |
# Running the production TPS simulation
This is file runs the main calculation for the flexible length TPS simulation. It requires the file `ad_tps_equil.nc`, which is written in the notebook `AD_tps_1_trajectory.ipynb`.
In this file, you will learn:
* how to load simulation objects from a file and reuse them
* how to run a production simulation
NB: This is a long calculation. In practice, it would be best to export the Python from this notebook and run non-interactively on a computing node, or to use save the setup to a file and use the [OpenPathSampling CLI](http://openpathsampling.org/latest/cli.html).
```
%matplotlib inline
import openpathsampling as paths
```
## Load simulation objects from file
In setting up the equilibration simulation, we've already defined everything we need for path sampling. One of the big strengths of OPS is that all simulation objects are saved in the storage file. This means that you can easily reload them for other simulations, and you have a clear chain of provenance, so you know that settings are *exactly* the same. This is why we name OPS objects using the `.named()` method -- it makes it easy to load them up later.
In this example, we'll create a new scheme. This scheme is actually identical to our equilibration scheme, so we could have just reused that old one. However, in many situations, you might use a different move scheme for equilibration than for production. For example, you might use only one-way shooting to equilibrate a TIS network, and but then use a full RETIS move scheme for the production run.
```
old_storage = paths.Storage("ad_tps_equil.nc", mode='r')
network = old_storage.networks['tps_network']
engine = old_storage.engines['300K']
last_result = old_storage.steps[-1].active
# note that we could have loaded other things from storage, for example:
#C_7eq = old_storage.volumes['C_7eq']
#alpha_R = old_storage.volumes['alpha_R']
# however, we don't need to, since all the information is in the network
```
## Run TPS
As always, the process for setting up a simulation is:
1. Create a `network`
2. Create a `move_scheme`
3. Set up `initial_conditions`
4. Create the `PathSampling` object and run it.
Since we already created all the input to these when we set up the first trajectory, we can load use the versions we loaded above.
```
scheme = paths.OneWayShootingMoveScheme(network,
selector=paths.UniformSelector(),
engine=engine)
initial_conditions = scheme.initial_conditions_from_trajectories(last_result)
storage = paths.Storage("ad_tps.nc", "w")
storage.save(initial_conditions); # save these to give storage a template
# we can only close the old storage after we've saved the initial conditions --
# this is because details of the snapshot aren't loaded until needed
old_storage.close()
sampler = paths.PathSampling(storage=storage,
move_scheme=scheme,
sample_set=initial_conditions).named("Flexible_TPS_Sampling")
```
Note: 10000 steps will take a long time. If you just want to run a little bit, reduce this number.
```
sampler.run(10000)
storage.close()
```
With this done, you can go on to do the flexible-length parts of the analysis in `AD_tps_3a_analysis_flex.ipynb`.
|
github_jupyter
|
%matplotlib inline
import openpathsampling as paths
old_storage = paths.Storage("ad_tps_equil.nc", mode='r')
network = old_storage.networks['tps_network']
engine = old_storage.engines['300K']
last_result = old_storage.steps[-1].active
# note that we could have loaded other things from storage, for example:
#C_7eq = old_storage.volumes['C_7eq']
#alpha_R = old_storage.volumes['alpha_R']
# however, we don't need to, since all the information is in the network
scheme = paths.OneWayShootingMoveScheme(network,
selector=paths.UniformSelector(),
engine=engine)
initial_conditions = scheme.initial_conditions_from_trajectories(last_result)
storage = paths.Storage("ad_tps.nc", "w")
storage.save(initial_conditions); # save these to give storage a template
# we can only close the old storage after we've saved the initial conditions --
# this is because details of the snapshot aren't loaded until needed
old_storage.close()
sampler = paths.PathSampling(storage=storage,
move_scheme=scheme,
sample_set=initial_conditions).named("Flexible_TPS_Sampling")
sampler.run(10000)
storage.close()
| 0.411702 | 0.967747 |
```
from kerashelper import *
import numpy as np
import random
from headers import *
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from data_utils import *
from image_utils import *
%load_ext autoreload
%autoreload 2
train_batch=20000
test_batch=5000
val_batch=5000
num_classes=10
idx_train = [x for x in range(train_batch)]
idx_test = [x for x in range(test_batch)]
idx_train=random.shuffle(idx_train)
idx_test=random.shuffle(idx_test)
newx_train,Y_train,newx_test,Y_test=x_train[idx_train,:,:,:],y_train[idx_train],x_test[idx_test,:,:,:],y_test[idx_test]
newx_train,Y_train,newx_val,Y_val,newx_test,Y_test=x_train[:train_batch,:,:,:],y_train[:train_batch],x_train[train_batch:(train_batch+val_batch),:,:,:],y_train[train_batch:(train_batch+val_batch)],x_test[:test_batch,:,:,:],y_test[:test_batch]
Y_train = np_utils.to_categorical(Y_train, num_classes)
Y_test = np_utils.to_categorical(Y_test, num_classes)
Y_val = np_utils.to_categorical(Y_val, num_classes)
print newx_train.shape,newx_test.shape,Y_train.shape,Y_test.shape,newx_val.shape,Y_val.shape
h,w=128,128
X1_train,X1_test,X1_val=transpose_batch(newx_train),transpose_batch(newx_test),transpose_batch(newx_val)
print X1_train.shape
print X1_test.shape
print X1_val.shape
X_train,X_test,X_val=resize_batch(X1_train,h,w),resize_batch(X1_test,h,w),resize_batch(X1_val,h,w)
print X_train.shape
print X_test.shape
print X_val.shape
X_train=(1.0/255)*X_train
X_test=(1.0/255)*X_test
X_val=(1.0/255)*X_val
num_classes=10
# Create the model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(128, 128, 3), padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
from keras.utils.vis_utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
plot_model(model, to_file='modelclass.png', show_shapes=True)
epochs = 10
lrate = 0.001
decay = lrate/epochs
#sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
adam=Adam(lr=lrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=decay)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print(model.summary())
# Fit the model
model.fit(X_train[:2000], Y_train[:2000], validation_data=(X_val[:100], Y_val[:100]), epochs=epochs, batch_size=200, verbose=1)
# Final evaluation of the model
scores = model.evaluate(X_test[:100], Y_test[:100], verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
print model
model.save('classifier.h5')
print "Stored to %s"%('classifier.h5')
```
|
github_jupyter
|
from kerashelper import *
import numpy as np
import random
from headers import *
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from data_utils import *
from image_utils import *
%load_ext autoreload
%autoreload 2
train_batch=20000
test_batch=5000
val_batch=5000
num_classes=10
idx_train = [x for x in range(train_batch)]
idx_test = [x for x in range(test_batch)]
idx_train=random.shuffle(idx_train)
idx_test=random.shuffle(idx_test)
newx_train,Y_train,newx_test,Y_test=x_train[idx_train,:,:,:],y_train[idx_train],x_test[idx_test,:,:,:],y_test[idx_test]
newx_train,Y_train,newx_val,Y_val,newx_test,Y_test=x_train[:train_batch,:,:,:],y_train[:train_batch],x_train[train_batch:(train_batch+val_batch),:,:,:],y_train[train_batch:(train_batch+val_batch)],x_test[:test_batch,:,:,:],y_test[:test_batch]
Y_train = np_utils.to_categorical(Y_train, num_classes)
Y_test = np_utils.to_categorical(Y_test, num_classes)
Y_val = np_utils.to_categorical(Y_val, num_classes)
print newx_train.shape,newx_test.shape,Y_train.shape,Y_test.shape,newx_val.shape,Y_val.shape
h,w=128,128
X1_train,X1_test,X1_val=transpose_batch(newx_train),transpose_batch(newx_test),transpose_batch(newx_val)
print X1_train.shape
print X1_test.shape
print X1_val.shape
X_train,X_test,X_val=resize_batch(X1_train,h,w),resize_batch(X1_test,h,w),resize_batch(X1_val,h,w)
print X_train.shape
print X_test.shape
print X_val.shape
X_train=(1.0/255)*X_train
X_test=(1.0/255)*X_test
X_val=(1.0/255)*X_val
num_classes=10
# Create the model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(128, 128, 3), padding='same', activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
from keras.utils.vis_utils import plot_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
plot_model(model, to_file='modelclass.png', show_shapes=True)
epochs = 10
lrate = 0.001
decay = lrate/epochs
#sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
adam=Adam(lr=lrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=decay)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print(model.summary())
# Fit the model
model.fit(X_train[:2000], Y_train[:2000], validation_data=(X_val[:100], Y_val[:100]), epochs=epochs, batch_size=200, verbose=1)
# Final evaluation of the model
scores = model.evaluate(X_test[:100], Y_test[:100], verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
print model
model.save('classifier.h5')
print "Stored to %s"%('classifier.h5')
| 0.857485 | 0.431884 |
```
%logstop
%logstart -rtq ~/.logs/PY_ProgramFlow.py append
import seaborn as sns
sns.set()
import expectexception
```
# Program Flow
<!-- requirement: images/high_score_flowchart.png -->
<!-- requirement: images/nested_logic_flowchart.png -->
## What is a computer program?
At its simplest, a program is a list of instructions that a computer carries out in order. A program could be long and complicated, but it is built of simple parts. Let's look at some simple operations in Python, and think about what the computer does for each one:
```
1 + 1
2 * 3.5
1 + 1
2 * 3.5
```
In the first cell we compute `1 + 1`, and Python returns the result `2`. We can think of this as a very short program. Similarly, in the second cell we compute `2 * 3.5`, and Python returns the result `7.0`.
However, in the third cell, when we combine these two statements as sequential lines, we only see Python return `7.0`. Why is that?
Python can only return one result at the end of the cell, so the first line is evaluated, but we never see the result. One way we can report intermediate results is using `print`.
```
print(1 + 1)
print(2 * 3.5)
```
We can also include lines in the code that the computer won't execute. We call these lines **comments**, because they are used to add notes and explanations to the code. We use `#` to indicate that we are making a comment.
```
print('1 + 1 is', 1 + 1)
# this is a comment, Python won't try to execute it
print('All done!')
```
Often we won't want to only print intermediate results, but _store_ them for later use. We can store a result in the computer's memory by assigning it to a **variable**.
```
first_result = 1 + 1
final_result = first_result * 3.5
print(final_result)
```
Here we were able to use the result of the first calculation (stored in the variable `first_result`) in our second calculation. We store the second result in `final_result`, which we can print at the end of the cell.
Variables help us keep track of the information we need to successfully execute a program. Variables can be used to store a variety of types of information.
```
my_name = 'Dylan'
my_age = 28
my_favorite_number = 2.718
has_dog = True
print('My name is', my_name)
print('My age is', my_age)
print('My favorite number is', my_favorite_number)
print('I own a dog:', has_dog)
```
Since variables can be used to store so many types of information, it's a good idea to give those variables descriptive names like I did. This helps us write code that is easy to read, which helps when we're trying to find and fix mistakes, or share code with others.
```
print(type(my_name))
print(type(my_age))
print(type(my_favorite_number))
print(type(has_dog))
```
A **string** is a sequence of characters. An **integer** has the same meaning as in mathematics (i.e. "whole numbers"). A **float** or **floating point number** refers to a decimal number (i.e. "real number" in mathematics); it is called a float because the decimal point is allowed to "float" through the digits, allowing us to represent both big numbers (e.g. 204939.12) and small numbers (e.g. 0.000239). A **bool** or **boolean** refers to a variable that is either true or false.
These are just a few types of data we will encounter, and we will explore others later on in the course.
In Python, we can assign any type of data to a variable without declaring what type the variable will be in advance. Not all programming languages behave this way.
### Exercises
1. Define `my_name` and `my_age` variables with values corresponding to your own name and age and print them.
1. Use your `my_age` variable to print out how old you will be in 10 years.
## Functions
Many programs react to user input. Functions allow us to define a task we would like the computer to carry out based on input. A simple function in Python might look like this:
```
def square(number):
return number**2
```
We define functions using the `def` keyword. Next comes the name of the function, which in this case is `square`. We then enclose the function's input in parentheses, in this case `number`. We use `:` to tell Python we're ready to write the body of the function.
In this case the body of the function is very simple; we return the square of `number` (we use `**` for exponents in Python). The keyword `return` signals that the function will generate some output. Not every function will have a `return` statement, but many will. A `return` statement ends a function.
Let's see our function in action:
```
# we can store function output in variables
squared = square(5.5)
print(squared)
my_number = 6
# we can also use variables as function input
print(square(my_number))
```
We can pass different input to the `square` function, including variables. When we passed a float to `square`, it returned a float. When we passed an integer, `square` returned an integer. In both cases the input was interpreted by the function as the argument `number`.
Not all possible inputs are valid.
```
%%expect_exception TypeError
print(square(int('banana')))
```
We ran into an error because `'banana'` is a string, not a number. We should be careful to make sure that the input for a function makes sense for that function's purpose. We'll talk more about errors like this one later on.
### Exercises
1. Write a function to cube a number.
2. Write a function, `say_hello` which takes in a name variable and print out "Hello name". `say_hello("zach")` should print `"Hello zach"`.
### Why Functions?
We can see that functions are useful for handling user input, but they also come in handy in numerous other cases. One example is when we want to perform an action multiple times on different input. If I want to square a bunch of numbers, in particular the numbers between 1 and 10, I can do this pretty easily (later we will learn about iteration which will make this even easier!)
```
1**2
2**2
3**2
4**2
5**2
6**2
7**2
8**2
9**2
```
It seems I forgot to save the answers or at least print them. This is easy:
```
print(1**2)
print(2**2)
print(3**2)
print(4**2)
print(5**2)
print(6**2)
print(7**2)
print(8**2)
print(9**2)
```
That worked! However, what if I now want to go back and add two to all the answers? Clearly changing each instance is not the right way to do it. Lets instead define a function to do the work for us.
```
def do_it(x):
print(x**2 + 2)
```
Now we can just call the function on every element. If we want to change the output, we just need to change the function in one place, not in all places we want to use the function!
```
do_it(1)
do_it(2)
do_it(3)
do_it(4)
do_it(5)
do_it(6)
do_it(7)
do_it(8)
do_it(9)
```
Splitting out the work into functions is often a way to make code more modular and understandable. It also helps ensure your code is correct. If we write a function and test it to be correct, we know it will be correct every time we use it. If we don't break out code into a function, it is very easy to make typos or other errors which will cause our programs to break.
### Exercises
1. Modify the `do_it` function to print the square of the value it currently prints.
## Syntax
As our instructions to the computer become more complicated, we will need to organize them in a way the computer understands. We've already seen an example of this with our `square` function. There was a specific order to the words and specific symbols we had to use to let Python know which part of the function was the definition and which part was the body, or which part was the name of the function and which part was the argument. We call the rules for organizing code the programming language's **syntax**.
Python's syntax is very streamlined so that code is readable and intuitive. Python accomplishes this by using whitespace to organize code. Let's look at some examples.
```
def example_a():
print('example_a is running')
print('returning value "a"')
return 'a'
example_a()
def example_b():
print('example_b is running')
print('exiting without returning a value')
example_b()
```
The function `example_a` ends with a return statement, but `example_b` has no return statement. How does Python know where `example_b` ends? We use indentation to indicate which lines are part of the function and which aren't. The indented lines are all grouped together under the function definition. We'll see this format again for controlling whether certain sections of code execute.
## Conditionals and logic
We'll often want the computer only to take an action under certain circumstances. For example, we might want a game to print the message 'High score!', but only if the player's score is higher than the previous high score. We can write this as a formal logical statement: *if* the player's score is higher than the previous high score _then_ print 'High score!'.
The syntax for expressing this logic in Python is very similar. Let's define a function that accepts the player's score and the previous high score as arguments. If the player's score is higher, then it will print 'High score!'. Finally, it will return the new high score (whichever one that is).
```
def test_high_score(player_score, high_score):
if player_score > high_score:
print('High score!')
high_score = player_score
return high_score
print(test_high_score(83, 98))
print(test_high_score(95, 93))
```
With `if` statements we use a similar syntax as we used for organizing functions. With functions we had a `def` statement ending with `:`, and an indented body. Similarly for a conditional, we have an `if` statement ending with `:`, and an indented body.
Conditional statements are used to control program flow. We can visualize our example, `test_high_score`, in a decision tree.

We can nest `if` statements to make more complicated trees.
```
def nested_example(x):
if x < 50:
if x % 2 == 0:
return 'branch a'
else:
return 'branch b'
else:
return 'branch c'
print(nested_example(42))
print(nested_example(51))
print(nested_example(37))
```
In this example, we have an `if` statement nested under another `if` statement. As we change the input, we end up on different branches of the tree.

The statement that follows the `if` is called the **condition**. The condition can be either true or false. If the condition is true, then we execute the statements under the `if`. If the condition is false, then we execute the statements under the `else` (or if there is no `else`, then we do nothing).
Conditions themselves are instructions that Python can interpret.
```
print(50 > 10)
print(2 + 2 == 4)
print(-3 > 2)
```
Conditions are evaluated as booleans, which are `True` or `False`. We can combine conditions by asking of condition A _and_ condition B are true. We could also ask if condition A *or* condition B are true. Let's consider whether such statements are true overall based on the possible values of condition A and condition B.
|Condition A|Condition B|Condition A and Condition B|Condition A or Condition B|
|:---------:|:---------:|:-------------------------:|:------------------------:|
|True|True|True|True|
|True|False|False|True|
|False|True|False|True|
|False|False|False|False|
```
print(True and True)
print(True and False)
print(False and True)
print(False and False)
print(True or True)
print(True or False)
print(False or True)
print(False or False)
x = 5
y = 3
print(x > 4 and y > 2)
print(x > 7 and y > 2)
print(x > 7 or y > 2)
```
The keywords `or` and `and` are called **logical operations** (in the same sense that we call `+`, `-`, `*`, etc. arithmetic operations). The last logical operation is `not`: `not True` is `False`, `not False` is `True`.
```
print(not True)
print(not False)
x = 10
y = 8
print(x > 7 or y < 7)
print(not x > 7 or y < 7)
print(not x > 7 or not y < 7)
print(not (x > 7 or y < 7))
```
### Exercises
1. Write a function which takes in a number and returns True if it is greater than 10 but less than 20 or it is less than -100.
2. In the code above we have used the `%` operator. What does this do?
## Iteration
Conditionals are very useful because they allow our programs to make decisions based on some information. These decisions control the flow of the program (i.e. which statements get executed). We have one other major tool for controlling program flow, which is repetition. In programming, we will use repetitive loops to execute the same code many times. This is called **iteration**. The most basic kind of iteration is the `while` loop. A `while` loop will keep executing so long as the condition after the `while` is `True`.
```
x = 0
while x < 5:
print(x)
x = x + 1
```
We will often use iteration to perform a task a certain number of times, but we might also use it to carry out a process to a certain stage of completion.
As an example of these different cases, we'll consider the Fibonacci sequence. The Fibonacci sequence is a sequence of numbers where the next number in the sequence is given by the sum of the previous two numbers. The first two numbers are given as 0 and 1. So the sequence begins 0, 1, 1, 2, 3, 5, 8...
The Fibonacci sequence goes on infinitely, so we can only ever compute part of it. Below we define two functions to compute part of the Fibonacci sequence; the first function computes the first `n` terms, while the second function computes all the terms less than an upper limit, `x`.
```
def first_n_fibonacci(n):
prev_num = 0
curr_num = 1
count = 2
print(prev_num)
print(curr_num)
while count <= n:
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
count += 1
def below_x_fibonacci(x):
prev_num = 0
curr_num = 1
if curr_num < x:
print(prev_num)
print(curr_num)
elif prev_num < x:
print(prev_num)
while curr_num + prev_num < x:
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
m = 7
print('First %d Fibonacci numbers' % m)
first_n_fibonacci(m)
print()
y = 40
print('Fibonacci numbers below %d' % y)
below_x_fibonacci(y)
```
Sometimes we will want our program to take a repeated action, but we won't know how many repetitions we will have to do, or it might be difficult to know ahead of time when the program should stop. For example, we could write a program that prints out cooking instructions. We don't know in advance how many instructions there will be in the recipe (some meals take a long time to cook and have many steps, while others are short and simple to make). We also don't know what the last instruction might be, so it would be difficult to write a conditional telling the program when to stop. How are we going to solve the problem? Let's look at an example.
Instructions for making bread:
1) Dissolve salt in water
2) Mix yeast into water
3) Mix water with flour to form dough
4) Knead dough
5) Let dough rise
6) Shape dough
7) Bake
The recipe has an ordered `list` of instructions. In Python we can use a list of strings to represent the instructions.
```
bread_recipe = ['Dissolve salt in water', 'Mix yeast into water', 'Mix water with flour to form dough',
'Knead dough', 'Let dough rise', 'Shape dough', 'Bake']
```
We will discuss lists more in the [Data Structures lecture](PY_DataStructures.ipynb). We could store different recipes in different lists.
```
soup_recipe = ['Dissolve salt in water', 'Boil water', 'Add bones to boiling water', 'Chop onions',
'Chop garlic', 'Chop carrot', 'Chop celery', 'Remove bones from water',
'Add vegetables to boiling water', 'Add meat to boiling water']
beans_recipe = ['Soak beans in water', 'Dissolve salt in water', 'Heat water and beans to boil',
'Drain beans when done cooking']
```
Each of these lists has different instructions, and they are not all the same length. The beans recipe has four steps while the soup recipe has ten. It would be hard to write a `while` loop to print out each step. It is much easier to do it using a `for` loop.
A `for` loop does an action for each item in a `list` (or more precisely, in an **iterable**).
```
def print_recipe(instructions):
for step in instructions:
print(step)
print_recipe(soup_recipe)
print_recipe(bread_recipe)
print_recipe(beans_recipe)
```
We can also use a `for` loop to repeat a task a certain number of times, like printing out the first `n` numbers in the Fibonacci sequence. Compare these two Fibonacci functions:
```
def first_n_fibonacci_while(n):
prev_num = 0
curr_num = 1
count = 2
print(prev_num)
print(curr_num)
while count <= n:
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
count += 1
def first_n_fibonacci_for(n):
prev_num = 0
curr_num = 1
print(prev_num)
print(curr_num)
for count in range(2, n + 1):
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
first_n_fibonacci_while(7)
first_n_fibonacci_for(7)
```
### Exercises
1. Compare `first_n_fibonacci_while` and `first_n_fibonacci_for`, which one is "better"?
### Aside (Recursion)
Another way to get something like iteration is called _recursion_ which is when we define a function in terms of itself. Lets write the Fibonacci sequence recursively. This will be slightly different in that it will only calculate the nth Fibonacci number.
```
def fibonacci_recursive(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci_recursive(n-1) + fibonacci_recursive(n-2)
fibonacci_recursive(7)
```
Here we make use of the fact that a Fibonacci number $F_n$ can be defined in terms of $F_{N-1}$ and $F_{N-2}$ with some base cases $F_0=0$ and $F_1=1$. We will not be using recursion in this course, but it is an interesting and useful programming construct.
## Putting it all together
We've learned two of the major components of programs: **variables** and **functions**. We've also learned two of the major components of program control: **conditionals** (`if` statements) and **iteration** (`for` and `while` loops). We can use these ideas and tools to write code to perform complex tasks. Let's look at an example, involving all of these ideas put together.
Below we write a function that prints out all the prime numbers up to some number `n`. We will use iteration to check if each number is prime. We will use a conditional to print out numbers only if they are prime. We will also break up the task into small pieces so our code is easy to read and understand. This means we will use (or _call_) helper functions inside of our solution.
```
def is_prime(number):
if number <= 1:
return False
for factor in range(2, number):
if number % factor == 0:
return False
return True
def print_primes(n):
for number in range(1, n):
if is_prime(number):
print('%d is prime' % number)
print_primes(42)
```
The other application of functions might be to do something many times (not necessarily in an iteration). One specific and natural way to understand this is to have a list elements and apply a function to each element of the `list`. Lets take a list of the first 20 numbers and find which ones are prime. We will do this and save the result in a `list`. Lists have an `append` method which allows us to add to the end of the list (we will see more about lists in the next lecture).
```
list_of_numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
prime_list = []
for number in list_of_numbers:
if is_prime(number):
prime_list.append(number)
prime_list
```
Python provides a nice construct to apply a function to every element of a list, called a `list comprehension`, here is an example of one:
```
[number for number in list_of_numbers if is_prime(number)]
```
Note that this is a simple bit of code that is very understandable. We don't need to care **how** the `is_prime` computation is occurring, only that its occurring for every element of `list_of_numbers`. This means that we can more view our program at a high level without caring about the small details (which hopefully we have already designed well and tested).
## More About Functions
Notice that the `example_a` and `example_b` had no input, but other functions like `test_high_score` had multiple variables as input. Remember that a function argument is just a placeholder for a name and will be bound to whatever is passed into the function. For example:
```
def print_this(a):
print('inside print_this: ', a)
a = 5
print_this(2)
print('a = ', a)
```
Notice that even though `print_this` was printing the variable `a` inside the function and there was a variable `a` defined outside of the function, the `print` function inside `print_this` still printed what was passed in. However, I can also
```
def print_it():
print('inside print_it: ', a)
a = 5
print_it()
print('a = ', a)
```
Here there is no variable passed into the function so Python uses the variable from the outer scope. Be careful with this second paradigm as it can be dangerous. The danger lies in the fact that the output of the function depends upon the overall state of the program (namely the value of `a`) as opposed to `print_this` which depends only on the input of the function. Functions like `print_this` are much easier to reason about, test, and use, they should be preferred in many contexts.
That said, there is a very powerful technique called `function closure` which we can make use of this ability. Lets say we want a function which will raise a number to some exponent, but we don't know which exponent ahead of runtime. We can define such a function like this.
```
def some_exponent(exponent):
def func(x):
return x**exponent
return func
some_exponent(2)(2), some_exponent(3)(2)
```
Now that we understand how normal arguments work, lets look at a few conveniences Python provides for making functions easier to create. The first is default arguments. Let's suppose we have a function which had a bunch of arguments, but most of them had sane defaults, for example:
```
def print_todo(watch_tv, read, eat, sleep):
print('I need to:')
if watch_tv:
print(' watch_tv')
if read:
print(' read')
if eat:
print(' eat')
if sleep:
print(' sleep')
print_todo(True, True, True, True)
```
I know that I almost always need to eat and sleep, so I can use a default argument for these instead. This means I don't need to define the value of `eat` and `sleep` unless they are different than the default.
```
def print_todo_default(watch_tv, read, eat=True, sleep=True):
print('I need to:')
if watch_tv:
print(' watch_tv')
if read:
print(' read')
if eat:
print(' eat')
if sleep:
print(' sleep')
print_todo_default(True, True)
```
These default arguments can allow us to create complex function with many inputs while also maintaining ease of use by setting sane defaults.
Another thing we might want to do is take a variable list of arguments, lets write a similar `todo` function as before, but this time we will allow it to pass in any number of arguments. Here we will make use of the `*args` syntax. This `*` tells python to gather the rest of the arguments into the tuple `args`.
```
def print_todo_args(*args):
print('I need to:')
for arg in args:
print(' ' + arg)
print_todo_args('watch_tv', 'read', 'eat', 'sleep')
```
This sort of syntax can be very useful in large programs where abstract functions may all a variety of different functions with different arguments.
### Some topics we haven't discussed, but have used:
- [String formatting](https://pyformat.info/)
- Exceptions (e.g. `TypeError`)
*Copyright © 2021 WorldQuant University. This content is licensed solely for personal use. Redistribution or publication of this material is strictly prohibited.*
|
github_jupyter
|
%logstop
%logstart -rtq ~/.logs/PY_ProgramFlow.py append
import seaborn as sns
sns.set()
import expectexception
1 + 1
2 * 3.5
1 + 1
2 * 3.5
print(1 + 1)
print(2 * 3.5)
print('1 + 1 is', 1 + 1)
# this is a comment, Python won't try to execute it
print('All done!')
first_result = 1 + 1
final_result = first_result * 3.5
print(final_result)
my_name = 'Dylan'
my_age = 28
my_favorite_number = 2.718
has_dog = True
print('My name is', my_name)
print('My age is', my_age)
print('My favorite number is', my_favorite_number)
print('I own a dog:', has_dog)
print(type(my_name))
print(type(my_age))
print(type(my_favorite_number))
print(type(has_dog))
def square(number):
return number**2
# we can store function output in variables
squared = square(5.5)
print(squared)
my_number = 6
# we can also use variables as function input
print(square(my_number))
%%expect_exception TypeError
print(square(int('banana')))
1**2
2**2
3**2
4**2
5**2
6**2
7**2
8**2
9**2
print(1**2)
print(2**2)
print(3**2)
print(4**2)
print(5**2)
print(6**2)
print(7**2)
print(8**2)
print(9**2)
def do_it(x):
print(x**2 + 2)
do_it(1)
do_it(2)
do_it(3)
do_it(4)
do_it(5)
do_it(6)
do_it(7)
do_it(8)
do_it(9)
def example_a():
print('example_a is running')
print('returning value "a"')
return 'a'
example_a()
def example_b():
print('example_b is running')
print('exiting without returning a value')
example_b()
def test_high_score(player_score, high_score):
if player_score > high_score:
print('High score!')
high_score = player_score
return high_score
print(test_high_score(83, 98))
print(test_high_score(95, 93))
def nested_example(x):
if x < 50:
if x % 2 == 0:
return 'branch a'
else:
return 'branch b'
else:
return 'branch c'
print(nested_example(42))
print(nested_example(51))
print(nested_example(37))
print(50 > 10)
print(2 + 2 == 4)
print(-3 > 2)
print(True and True)
print(True and False)
print(False and True)
print(False and False)
print(True or True)
print(True or False)
print(False or True)
print(False or False)
x = 5
y = 3
print(x > 4 and y > 2)
print(x > 7 and y > 2)
print(x > 7 or y > 2)
print(not True)
print(not False)
x = 10
y = 8
print(x > 7 or y < 7)
print(not x > 7 or y < 7)
print(not x > 7 or not y < 7)
print(not (x > 7 or y < 7))
x = 0
while x < 5:
print(x)
x = x + 1
def first_n_fibonacci(n):
prev_num = 0
curr_num = 1
count = 2
print(prev_num)
print(curr_num)
while count <= n:
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
count += 1
def below_x_fibonacci(x):
prev_num = 0
curr_num = 1
if curr_num < x:
print(prev_num)
print(curr_num)
elif prev_num < x:
print(prev_num)
while curr_num + prev_num < x:
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
m = 7
print('First %d Fibonacci numbers' % m)
first_n_fibonacci(m)
print()
y = 40
print('Fibonacci numbers below %d' % y)
below_x_fibonacci(y)
bread_recipe = ['Dissolve salt in water', 'Mix yeast into water', 'Mix water with flour to form dough',
'Knead dough', 'Let dough rise', 'Shape dough', 'Bake']
soup_recipe = ['Dissolve salt in water', 'Boil water', 'Add bones to boiling water', 'Chop onions',
'Chop garlic', 'Chop carrot', 'Chop celery', 'Remove bones from water',
'Add vegetables to boiling water', 'Add meat to boiling water']
beans_recipe = ['Soak beans in water', 'Dissolve salt in water', 'Heat water and beans to boil',
'Drain beans when done cooking']
def print_recipe(instructions):
for step in instructions:
print(step)
print_recipe(soup_recipe)
print_recipe(bread_recipe)
print_recipe(beans_recipe)
def first_n_fibonacci_while(n):
prev_num = 0
curr_num = 1
count = 2
print(prev_num)
print(curr_num)
while count <= n:
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
count += 1
def first_n_fibonacci_for(n):
prev_num = 0
curr_num = 1
print(prev_num)
print(curr_num)
for count in range(2, n + 1):
next_num = curr_num + prev_num
print(next_num)
prev_num = curr_num
curr_num = next_num
first_n_fibonacci_while(7)
first_n_fibonacci_for(7)
def fibonacci_recursive(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci_recursive(n-1) + fibonacci_recursive(n-2)
fibonacci_recursive(7)
def is_prime(number):
if number <= 1:
return False
for factor in range(2, number):
if number % factor == 0:
return False
return True
def print_primes(n):
for number in range(1, n):
if is_prime(number):
print('%d is prime' % number)
print_primes(42)
list_of_numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
prime_list = []
for number in list_of_numbers:
if is_prime(number):
prime_list.append(number)
prime_list
[number for number in list_of_numbers if is_prime(number)]
def print_this(a):
print('inside print_this: ', a)
a = 5
print_this(2)
print('a = ', a)
def print_it():
print('inside print_it: ', a)
a = 5
print_it()
print('a = ', a)
def some_exponent(exponent):
def func(x):
return x**exponent
return func
some_exponent(2)(2), some_exponent(3)(2)
def print_todo(watch_tv, read, eat, sleep):
print('I need to:')
if watch_tv:
print(' watch_tv')
if read:
print(' read')
if eat:
print(' eat')
if sleep:
print(' sleep')
print_todo(True, True, True, True)
def print_todo_default(watch_tv, read, eat=True, sleep=True):
print('I need to:')
if watch_tv:
print(' watch_tv')
if read:
print(' read')
if eat:
print(' eat')
if sleep:
print(' sleep')
print_todo_default(True, True)
def print_todo_args(*args):
print('I need to:')
for arg in args:
print(' ' + arg)
print_todo_args('watch_tv', 'read', 'eat', 'sleep')
| 0.30013 | 0.953751 |
# Laboratorio 1 - Algoritmo de Busqueda - Razonamiento y Plan. Automatica
Autores:
- Manuel Pasieka
- David
- Oscar Piqueras
- Gonzalo Molina
- Samuel Bermejo
Se ha resuelto el ejercicio usando 4 algoritmos diferentes:
- Algoritmo de Busqueda
- Algoritmo de Profunidad
- Algoritmo de A*
- Algoritmo de Hill Climbing
```
import time
class State:
def __init__(self,stack,table,action):
self.stack = stack
self.table = table
self.action = action
def compare(self,state):
if (len(self.stack) != len(state.stack) | len(self.table) != len(state.table) ):
return False
for i in range(len(state.stack)):
if self.stack[i] != state.stack[i]:
return False
return len(set(self.table) & set(state.table)) == len(state.table)
def heuristic(self,state):
state_len = len(state.stack)
self_len = len(self.stack)
for i in range(self_len):
if self.stack[i] != state.stack[i]:
return (state_len - i) + (self_len - i)
return state_len - self_len
def pop(self):
piece = self.stack.pop()
self.table.append(piece)
return piece
def push(self,piece):
self.table.remove(piece)
self.stack.append(piece)
def copy(self,action):
return State(self.stack.copy(),self.table.copy(),action)
def expand(self):
expand_list = []
for piece in self.table:
state = self.copy("push {0} in stack".format(piece))
state.push(piece)
expand_list.append(state)
if (len(self.stack) != 0):
state = self.copy(None)
state.action = "pop {0} from stack".format(state.pop())
expand_list.append(state)
return expand_list
def print_state(self):
print('Acción ' + str(self.action) + ': Stack ' + str(self.stack) + ' Table ' + str(self.table))
class Node:
def __init__(self,state,father_node, cost = 0, heuristic = 9999999):
self.state = state
self.father_node = father_node
self.cost = cost
self.heuristic = heuristic
def printNode(self):
self.state.print_state()
def print_family(self):
l = []
n = self
while n != None:
l.append(n)
n = n.father_node
l.reverse()
for n in l:
n.state.print_state()
def isBucle(selft,state):
n = selft
while n != None:
if n.state.compare(state):
return True
n = n.father_node
return False
class general_search:
def __init__(self,state,goal_state):
self.state = state
self.goal_state = goal_state
self.open_list = []
self.visited_nodes = 0
self.start = 0
def print_performance(self):
print(f'Nodos en abierta = {len(self.open_list)}\nNodos visitados {self.visited_nodes}\n')
def print_runtime(self):
end = time.time()
time_elapsed=(end-self.start)
time_elapsed =round(time_elapsed,9)
print("Time elapsed: ", time_elapsed, "seconds")
return time_elapsed
def profundidad(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
print("Evaluamos nodo...")
node = self.open_list.pop(0)
node.printNode()
if node.state.compare(self.goal_state):
return node
print("No es meta, luego expandimos.")
self.visited_nodes += 1
print("Posibles nodos a explorar:")
succesors = node.state.expand()
for state in succesors:
if not node.isBucle(state):
n = Node(state,node)
n.printNode()
self.open_list.append(n)
self.open_list.insert(0, n)
def search_breadth_first(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
print("Evaluamos nodo...")
node = self.open_list.pop(0)
node.printNode()
if node.state.compare(self.goal_state):
return node
print("No es meta, luego expandimos.")
self.visited_nodes += 1
print("Posibles nodos a explorar:")
succesors = node.state.expand()
for state in succesors:
if not node.isBucle(state):
n = Node(state,node)
n.printNode()
self.open_list.append(n)
def search_A_star(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
self.open_list = sorted(self.open_list,key= lambda x: x.heuristic)
node = self.open_list.pop(0)
if node.state.compare(self.goal_state):
return node
self.visited_nodes += 1
succesors = node.state.expand()
for state in succesors:
if not node.isBucle(state):
h = state.heuristic(self.goal_state)
g = node.cost + 1
f = g + h
n = Node(state,node,node.cost,f)
self.open_list.append(n)
def search_hill_climbing(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
self.open_list = sorted(self.open_list,key= lambda x: x.heuristic)
node = self.open_list.pop(0)
if node.state.compare(self.goal_state):
return node
self.visited_nodes += 1
succesors = node.state.expand()
succ = []
for state in succesors:
h = state.heuristic(self.goal_state)
g = node.cost + 1
f = g + h
n = Node(state,node,g,h)
succ.append(n)
succ = sorted(succ,key= lambda x: x.heuristic)
self.open_list.append(succ.pop(0))
# Algoritmo en Amplitud, busqueda A* y Hill Climbing con los bloques del enunciado
initial_state = State(['E','D','A'],['C','B'],'Inicio')
goal_state = State(['E','D','C','B','A'],[],'Estado Final')
#initial_state = State(['A','D','E'],['C','B','G','H'],'Inicio')
#goal_state = State(['H','G','E','D','C','B','A'],[],'Estado Final')
s = general_search(initial_state,goal_state)
for algorithm in [s.search_breadth_first, s.search_A_star, s.search_hill_climbing]:
print(f'Running {algorithm.__name__} ...')
result = algorithm()
s.print_runtime()
s.print_performance()
result.print_family()
## Algoritmo busqueda A* y Hill Climbing con los bloques del enunciado
# Algoritmo busqueda en Amplitud con estos nodos tarda mucho tiempo, por eso no se ha metido.
initial_state = State(['A','D','E'],['C','B','G','H'],'Inicio')
goal_state = State(['H','G','E','D','C','B','A'],[],'Estado Final')
s = general_search(initial_state,goal_state)
for algorithm in [s.search_A_star, s.search_hill_climbing]:
print(f'Running {algorithm.__name__} ...')
result = algorithm()
s.print_runtime()
s.print_performance()
result.print_family()
## Algoritmo en profundidad con Bloques del enunciado
## Nota: tarda algo mas de 1 minuto en completar.
initial_state = State(['E','D','A'],['C','B'],'Inicio')
goal_state = State(['E','D','C','B','A'],[],'Estado Final')
s = general_search(initial_state,goal_state)
for algorithm in [s.profundidad]:
print(f'Running {algorithm.__name__} ...')
result = algorithm()
s.print_runtime()
s.print_performance()
result.print_family()
```
|
github_jupyter
|
import time
class State:
def __init__(self,stack,table,action):
self.stack = stack
self.table = table
self.action = action
def compare(self,state):
if (len(self.stack) != len(state.stack) | len(self.table) != len(state.table) ):
return False
for i in range(len(state.stack)):
if self.stack[i] != state.stack[i]:
return False
return len(set(self.table) & set(state.table)) == len(state.table)
def heuristic(self,state):
state_len = len(state.stack)
self_len = len(self.stack)
for i in range(self_len):
if self.stack[i] != state.stack[i]:
return (state_len - i) + (self_len - i)
return state_len - self_len
def pop(self):
piece = self.stack.pop()
self.table.append(piece)
return piece
def push(self,piece):
self.table.remove(piece)
self.stack.append(piece)
def copy(self,action):
return State(self.stack.copy(),self.table.copy(),action)
def expand(self):
expand_list = []
for piece in self.table:
state = self.copy("push {0} in stack".format(piece))
state.push(piece)
expand_list.append(state)
if (len(self.stack) != 0):
state = self.copy(None)
state.action = "pop {0} from stack".format(state.pop())
expand_list.append(state)
return expand_list
def print_state(self):
print('Acción ' + str(self.action) + ': Stack ' + str(self.stack) + ' Table ' + str(self.table))
class Node:
def __init__(self,state,father_node, cost = 0, heuristic = 9999999):
self.state = state
self.father_node = father_node
self.cost = cost
self.heuristic = heuristic
def printNode(self):
self.state.print_state()
def print_family(self):
l = []
n = self
while n != None:
l.append(n)
n = n.father_node
l.reverse()
for n in l:
n.state.print_state()
def isBucle(selft,state):
n = selft
while n != None:
if n.state.compare(state):
return True
n = n.father_node
return False
class general_search:
def __init__(self,state,goal_state):
self.state = state
self.goal_state = goal_state
self.open_list = []
self.visited_nodes = 0
self.start = 0
def print_performance(self):
print(f'Nodos en abierta = {len(self.open_list)}\nNodos visitados {self.visited_nodes}\n')
def print_runtime(self):
end = time.time()
time_elapsed=(end-self.start)
time_elapsed =round(time_elapsed,9)
print("Time elapsed: ", time_elapsed, "seconds")
return time_elapsed
def profundidad(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
print("Evaluamos nodo...")
node = self.open_list.pop(0)
node.printNode()
if node.state.compare(self.goal_state):
return node
print("No es meta, luego expandimos.")
self.visited_nodes += 1
print("Posibles nodos a explorar:")
succesors = node.state.expand()
for state in succesors:
if not node.isBucle(state):
n = Node(state,node)
n.printNode()
self.open_list.append(n)
self.open_list.insert(0, n)
def search_breadth_first(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
print("Evaluamos nodo...")
node = self.open_list.pop(0)
node.printNode()
if node.state.compare(self.goal_state):
return node
print("No es meta, luego expandimos.")
self.visited_nodes += 1
print("Posibles nodos a explorar:")
succesors = node.state.expand()
for state in succesors:
if not node.isBucle(state):
n = Node(state,node)
n.printNode()
self.open_list.append(n)
def search_A_star(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
self.open_list = sorted(self.open_list,key= lambda x: x.heuristic)
node = self.open_list.pop(0)
if node.state.compare(self.goal_state):
return node
self.visited_nodes += 1
succesors = node.state.expand()
for state in succesors:
if not node.isBucle(state):
h = state.heuristic(self.goal_state)
g = node.cost + 1
f = g + h
n = Node(state,node,node.cost,f)
self.open_list.append(n)
def search_hill_climbing(self):
node = Node(self.state,None)
self.open_list = [node]
self.visited_nodes = 0
self.start = time.time()
while True:
if len(self.open_list) == 0:
return None
self.open_list = sorted(self.open_list,key= lambda x: x.heuristic)
node = self.open_list.pop(0)
if node.state.compare(self.goal_state):
return node
self.visited_nodes += 1
succesors = node.state.expand()
succ = []
for state in succesors:
h = state.heuristic(self.goal_state)
g = node.cost + 1
f = g + h
n = Node(state,node,g,h)
succ.append(n)
succ = sorted(succ,key= lambda x: x.heuristic)
self.open_list.append(succ.pop(0))
# Algoritmo en Amplitud, busqueda A* y Hill Climbing con los bloques del enunciado
initial_state = State(['E','D','A'],['C','B'],'Inicio')
goal_state = State(['E','D','C','B','A'],[],'Estado Final')
#initial_state = State(['A','D','E'],['C','B','G','H'],'Inicio')
#goal_state = State(['H','G','E','D','C','B','A'],[],'Estado Final')
s = general_search(initial_state,goal_state)
for algorithm in [s.search_breadth_first, s.search_A_star, s.search_hill_climbing]:
print(f'Running {algorithm.__name__} ...')
result = algorithm()
s.print_runtime()
s.print_performance()
result.print_family()
## Algoritmo busqueda A* y Hill Climbing con los bloques del enunciado
# Algoritmo busqueda en Amplitud con estos nodos tarda mucho tiempo, por eso no se ha metido.
initial_state = State(['A','D','E'],['C','B','G','H'],'Inicio')
goal_state = State(['H','G','E','D','C','B','A'],[],'Estado Final')
s = general_search(initial_state,goal_state)
for algorithm in [s.search_A_star, s.search_hill_climbing]:
print(f'Running {algorithm.__name__} ...')
result = algorithm()
s.print_runtime()
s.print_performance()
result.print_family()
## Algoritmo en profundidad con Bloques del enunciado
## Nota: tarda algo mas de 1 minuto en completar.
initial_state = State(['E','D','A'],['C','B'],'Inicio')
goal_state = State(['E','D','C','B','A'],[],'Estado Final')
s = general_search(initial_state,goal_state)
for algorithm in [s.profundidad]:
print(f'Running {algorithm.__name__} ...')
result = algorithm()
s.print_runtime()
s.print_performance()
result.print_family()
| 0.273283 | 0.727842 |
# Learning Objectives
- [ ] 3.3.6 *Understand how NoSQL database management system addresses the shortcomings of relational database management system (SQL).
- [ ] 3.3.7 *Explain the applications of SQL and NoSQL.
- [ ] 3.3.8 *Use a programming language to work with both SQL and NoSQL databases.
# References
1. Leadbetter, C., Blackford, R., & Piper, T. (2012). Cambridge international AS and A level computing coursebook. Cambridge: Cambridge University Press.
2. https://www.mongodb.com/compare/mongodb-dynamodb
Recall that a **database** is a collection of related data where all records have the same structure or collection of data stored in an organised or logical manner.
# 13.1 NoSQL databases
Relational databases (commonly referred as SQL databases) work well with structured data since each table's **schema** (the precise description of the data to be stored and the relationships between them) is always clearly defined. However, with the
increasing number of ways to gather and generate data, we often need to deal with unstructured data.
For example, a convenience store that frequently refreshes the services it provides may sell both mobile phones as well as groceries. To run the store, information about both mobile phones (e.g., model names and prices) and groceries (e.g prices and descriptions) need to be stored. In the future, the store may also start selling mobile phone subscription plans as well. Storing all this data in the same relational database may not be easy. In this case, non-relational databases, also referred to as NoSQL databases, can offera better choice.
There are four main types of NoSQL databases:
- key-value databases. In this databasae, data is stored as a collection of key-value pairs in which a key serves as a unique identifier. E.g. Amazon DynamoDB. In this database, your query is limited to the key **only** and values retrieved by the key are not known (opaque).
- document databases. Document databases work like a hash table, but each key can point to an embedded key-value structure, also known as a **document**, instead of just a single value. (Recall that in a hash table, each key points to a single value or data item.). E.g., MongoDB
- wide column databases. Data tables are stored in terms of column instead of row.
- graph databases are databases that uses graph structures for semantic queries with nodes, edges, and properties to represent and store data. E.g., neo4j
# 13.2 Differences between SQL and Document Database
- Relational databases have a **fixed, predefined schema** that its tables follows but NoSQL databases usually have **no predefined schema**, which is dynamic and can change easily
- Relational databases contain tables while document databases like MongoDB contain collections. The data types of each field in the table is fixed for relational databases but it is flexible for document databases like MongoDB.
- Relational databases represent data in tables and rows while document databases store ata as collections of documents.
- For relational databases, joins are usually used to get data across tables, while for document databases like MongoDB there is usually no such joins. Thus it is easier to use relational databases for complex queries rather than NoSQL databases.
# 13.3 MongoDB
MongoDB is a very popular NoSQL document database, which uses `JSON` (Java Script Notation Object)-like **documents** to store records. JSON has the format
>```python
> {
> <attribute_name_1>: <attribute_values_1>,
> <attribute_name_2>: <attribute_values_2>
> ....
> }
>```
which looks like python `dict` object.
Terminologies used for MongoDB is a little different compared with SQL. Below is the table of terms in MongoDB with corresponding terms in SQL.
<center>
| **SQL Term** | **MongoDB Term** |
|-|-|
| `Database` | `Database` |
| `Table` | `Collection` |
| `Row/Record` | `Document` |
| `Column/Field/Attribute` | `Field` |
</center>
## 13.3.1 Running MongoDB
After installation, open command prompt and type `mongo` to run MongoDB shell. To maintain access to the MongoDB databases, you need to **make sure that MongoDB is running**, i.e. don't close MongoDB shell.
<center>
<img src="images/database_create.gif" width="1080" align="center"/>
</center>
> If you encounter an error, MongoDB folder might not have been added to the PATH environment variable. Click <a href = 'https://dangphongvanthanh.wordpress.com/2017/06/12/add-mongos-bin-folder-to-the-path-environment-variable/' >here</a> for troubleshooting.
Some useful commands to run on MongoDB shell
- `help` : get the available shell commands
- `show dbs` : show the currently available databases in MongoDB
- `use <db_name>` : set current database to `<db_name>`
- `db.createCollection(<collection_name>)` : create collection named `<collection_name>` in the database
- after you have set your current database, you can insert documents into the database by running `db.<collection_name>.insert(<json_obj>)`
- `show collections` : show the available collections in the current database
> Instead of creating collection with `db.createCollection(<collection_name>)`, `db.<collection_name>.insert(<json_obj>)` will automatically create the collection with the document is added.
### Exercise
On MongoDB shell, create a database called `test_info` and insert the following JSON object as a document in the collection `Person` in the database.
>```python
>{
> 'name':'John Lim',
> 'class': '18S01',
> 'hobbies': ['running','kayaking','gaming']
>}
>```
```
#YOUR_CODE_HERE
```
## 13.3.1 Interacting with MongoDB with `pymongo`
Similar to relational, we need to know how to execute the important database operations (CRUD) with MongoDB as well. However, for MongoDB, we will skip on the MongoDB shell commands and go straight up to the commands in `pymongo`, which is a Python to interact with MongoDB databases (as warned earlier, keep the MongoDB running else you will encounter errors.)
## 13.8.1 Connecting to MongoDB database with `pymongo`
Roughly speaking, to work with the database,
1. We first **establish connection** to the MongoDB server by creating `pymongo.MongoClient` object to `localhost` with the default port `270107`
2. Access the database through the client.
3. Access the collection through the database.
4. Do your query, insertion, updating and deletion.
### Example 26
The code below illustrates the process of connecting to the database `test_info` and accessing the collection `Person` with `pymongo`.
```
# We can actually do
# import pymongo
# but this means that at line 8, we'll have client = pymongo.Mongoclient('localhost', 27017)
from pymongo import MongoClient
try:
client = MongoClient('localhost', 27017) #localhost is your local computer address 127.0.0.1
print("Connected successfully!!!")
except:
print("Could not connect to MongoDB")
db = client['test_info']
coll = db['Person']
# Note that for pymongo, we don't need to close the connection as it's done automatically for us.
```
# 13.8.2 CRUD operations with `pymongo`
Unlike `sqlite` which do CRUD operations by passing SQL statements into the `execute` command, the CRUD operations with `pymongo` is done through various methods to the objects found in `pymongo`. Some of the methods act on `pymongo.collection.Collection` objects and they are:
- `insert_one()` : insert one document into a collection
- `insert_many()` : insert more than one document into a collection
- `find()` : object to query documents from the collection
- `update_one()` : object to update a document in the collection
- `update_many()` : object to update more than one document in the collection
## 13.8.2.1 Creating Database and Collection
To create databases and collection in MongoDB with `pymongo` is a simple task. We just need to
- connect to a **running** MongoDB server,
- create a connection through `MongoClient` object
- access the database through the connection object by treating it like a Python `dict` object
- access the collection through the database object also by treating it like a Python `dict` object.
So the boilerplate code is as such
```
from pymongo import MongoClient
try:
client = MongoClient('localhost', 27017) #localhost is your local computer address 127.0.0.1
print("Connected successfully!!!")
except:
print("Could not connect to MongoDB")
db = client['<DATABASE_NAME>'] # where <DATABASE_NAME> should be replaced with appropriate string
coll = db['<COLLECTION_NAME>']
print(type(coll))
```
# 13.8.2.2 Creating Documents in a collection
There are two ways to insert documents into a MongoDB database with `pymongo` and they following the following steps:
1. Access the collection that you want to insert document(s) to
2. Use either,
- `insert_one(doc)`, where `doc` is JSON-like object (think Python `dict`). Note that the keys in the key-value pairs are string objects.
- `insert_many(docs)` : where `docs` is a Python `list` object containing the JSON-like objects.
> When inserting documents in MongodDB, there is a special field called `_id` that acts like a primary key that is automatically created and inserted as an attribute in the document inserted when it's not supplied. In the event we need to specify the `_id` attribute, we should include it in the JSON objects we're passing into the methods.
### Exercise
Add the following documents into `Person` collection in the `test_info` database using `insert_one` and `insert_many` respectively.
1. `{ "name": "Ben", "address": "Park Lane 38"}`,
2. `{ "name": "Amy", "address": "Apple st 652"}`, `{ "name": "Sandy", "address": "Ocean blvd 2"}`, `{ "name": "Ben", "address": "Park Lane 38"}`.
```
#YOUR_CODE_HERE
```
# 13.8.2.2 Query Documents in a collection
Querying for documents in collection is done through `find()` method of `pymongo.collection.Collection` object. It acts like `SELECT` statement for SQL.
- when no parameters is passed into `find()`, it will return a `Cursor` object that contains **all** the documents in the collection.
- the interesting thing happens when you have specific attributes that you want to inspect from the collection and this is where things could get a little complicated. To do this, we pass `{<attribute_name_1>:<value_1>,{<attribute_name_2>:<value_2>/,....}` as an argument, where `...` represents more attributes to cover.
### Exercise
Add the following documents into `Person` collection in the `test_info` database using `insert_one` and `insert_many` respectively.
1. `{ "name": "Ben", "address": "Park Lane 38"}`,
2. `{ "name": "Amy", "address": "Apple st 652"}`, `{ "name": "Sandy", "address": "Ocean blvd 2"}`, `{ "name": "Ben", "address": "Park Lane 38"}`.
# 13.4 Situations to use SQL or NoSQL
The choice of whether to use a SQL or NoSQL database depends on the type of data being stored as well as the nature of tasks that the database is required to perform.
SQL databases should be used if:
- The data being stored has a fixed schema.
- Complex and varied queries will be frequently performed.
- The atomicity, consistency,isolation and durability (ACID) properties are critical to the database.
- There will be a high number of simultaneous transactions.
NoSQL databases should be used if:
- The data being stored has a dynamic schema, (i.e., unstructured data with flexible data types).
- Data storage needs to be performed quickly.
- There will be an extremely large amount of data (i.e., Big Data).
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('Alex', 'Ong', 98765432)")
#Sometimes it can be clearer if you split your SQL statements into multiple lines and you can use `\` in python for this purpose
connection.execute( "UPDATE Borrower SET Surname = 'Lim' " +\
"WHERE FirstName = 'Alex'")
connection.close()
#YOUR_ANSWER_HERE
```
2. Try the following code and check the database again. What do you observe?
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('Alex', 'Ong', 98765432)")
connection.execute( "UPDATE Borrower SET Surname = 'Lim' " +\
"WHERE FirstName = 'Alex'")
#addded the following method
connection.commit()
connection.close()
#YOUR_ANSWER_HERE
```
> It is important to run the commit() method to save the changes to the database. This is equivalent to the action `Write Changes` we used in DB4S.
Alternative to saving the database manually using `commit()` method, similar to file I/O, we can also utilize the `with` statement in Python.
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
with connection:
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('Alex', 'Ong', 98765432)")
connection.execute( "UPDATE Borrower SET Surname = 'Lim' " +\
"WHERE FirstName = 'Alex'")
#commit() method to save the changes is no longer required.
#connection to database still need to be closed
connection.close()
```
### Exercise 28 [`INSERT`, `DELETE`, `UPDATE`]
1. Try out the following code and check the database with DB4S.
- What changes do you expect from executing the code?
- What do you observe about the database?
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('AlexA', 'Ong', 98765432)")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('VijayA', 'Singh', 91919191)")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('AlexB', 'Ong', 98765432)")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('VijayB', 'Singh', 91919191)")
connection.commit()
connection.close()
```
2. Try out the following code and check the database again with DB4S.
- What changes do you expect from executing the code?
- What do you observe about the database?
# 13.3 Advantages of NoSQL Databases over Relational Databases
- Relational databases have a predefined schema that is difficult to change. Even if youw ish to add a fieldto a small number of records, you still need to include the field for the entire table. Therefore, it can be difficult to support the processing of unstructured data using relational databases compared to NoSQL databases.
- Unlike NoSQL databases, relational databases do not usually support hierarchical data storage, where less frequently-used data is moved to cheaper, slower storage devices. This means that the cost of storing data in a relational database is more expensive than storing the same amount of data in a NoSQL database.
- Relational databases are mainly vertically scalable while NoSQL databases are mainly horizontally scalable. Vertically scalable means that improving the performance of a relational database server usually requires upgrading an existing server with faster processors and more memory. Such high-performance components can be expensive and upgrades are limited by the capacity of a single machine. On the other hand, horizontally scalable means that the performance of a NoSQL database can be improved by simply increasing the number of servers. This is relatively cheaper as mass-produced average-performance computers are easily available at low prices.
- Relational databases are stored in a server, which makes the database unavailable when the server fails. NoSQL databases are designed to take advantage of multiple servers so that if one server fails, the other servers can continue to support applications.
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('AlexC', 'Ong', 98765432)")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('VijayC', 'Singh', 91919191)")
#the following line is added
connection.rollback()
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('AlexD', 'Ong', 98765432)")
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES('VijayD', 'Singh', 91919191)")
connection.commit()
connection.close()
```
> `rollback()` method undo the changes to the database. This is equivalent to the action `Revert Changes` we used in DB4S.
### Example 29 [`CREATE TABLE, DROP TABLE`]
Try out the following code blocks and check the database with DB4S. What do you expect the code blocks do?
```
import sqlite3
connection = sqlite3.connect("./resources/newfile.db")
connection.execute("CREATE TABLE Book(" +\
"ID INTEGER PRIMARY KEY,
"Title TEXT" +\
")"
)
connection.execute("CREATE TABLE BookToo(" +\
"ID INTEGER PRIMARY KEY,
"Title TEXT" +\
")"
)
connection.commit()
connection.close()
import sqlite3
connection = sqlite3.connect("./resources/newfile.db")
connection.execute("DROP TABLE Book")
connection.commit()
connection.close()
```
The last of the CRUD operation we will discuss is the Read/Retrieve operation. We will show 4 ways to do this with `sqlite3` module.
1. iterate the `Cursor` object, which is also created when we run `execute()` method on `Connection` object,
2. use the `fetchone()` method of the `Cursor` object.
3. use the `fetchall()` method of the `Cursor` object.
4. setting the `row_factory` attribute of the `Connection` object as `sqlite3.Row` object. `Row` provides both index-based and case-insensitive name-based access to columns and most useful when we want name-based access to columns.
### Example 30 [`SELECT`]
Try out the following code block 3 times with appropriate commenting and uncommenting of the relevant parts of the code. What can you observe about the type of the output given by each of the approach?
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
cursor = connection.execute("SELECT ID, FirstName FROM Borrower")
#Approach 1
for row in cursor:
print(row)
#Approach 2
row = cursor.fetchone()
while row is not None:
print(row)
row = cursor.fetchone()
#Approach 3
rows = cursor.fetchall()
print(rows)
connection.close()
#YOUR_ANSWER_HERE
```
### Example 31 [`SELECT`]
Try out the following code block 5 times with appropriate commenting and uncommenting of the relevant parts of the code. What can you observe about each output?
```
import sqlite3
connection = sqlite3.connect("./resources/library_copy.db")
#setting the `row_factory` attribute of `Connection` object
connection.row_factory = sqlite3.Row
cursor = connection.execute("SELECT ID, FirstName FROM Borrower")
#Try 1
for row in cursor:
print(row)
#Try 2
row = cursor.fetchone()
while row is not None:
print(row)
row = cursor.fetchone()
#Try 3
rows = cursor.fetchall()
print(rows)
#Try 4
for row in cursor:
print(row['ID'])
print(row['FirstName']
#Try 5
row = cursor.fetchone()
while row is not None:
print(row['ID'])
print(row['FirstName'])
row = cursor.fetchone()
connection.close()
```
The main advantage of using `Row` objects is that they are more flexible as they behave like `dict` objects; we can index values by column name instead of relying on the order of columns in the original `SELECT` statement.
# 13.8.3 SQL Injection Protection with `sqlite3`
SQL injection is a web security vulnerability that allows an attacker to interfere with the queries that an application makes to its SQL database. It generally allows an attacker to view data that they are not normally able to retrieve. This might include data belonging to other users, or any other data that the application itself is able to access. In many cases, an attacker can modify or delete this data, causing persistent changes to the application's content or behavior.
Consider a shopping application that displays products in different categories. When the user clicks on the Gifts category, their browser requests the URL:
>```
>
>https://insecure-website.com/products?category=Gifts
>
>```
This causes the application to make an SQL query to retrieve details of the relevant products from the database:
>```
>
>SELECT * FROM products WHERE category = 'Gifts' AND released = 1
>
>```
Note that we can presume that:
- This SQL query asks the database to return all details (`*`) from the `products` table where the category is `"Gifts"` and `released` is `1`.
- The restriction `released = 1` is being used to hide products that are not released. For unreleased products, presumably `released = 0`.
The application doesn't implement any defenses against SQL injection attacks, so an attacker can construct an attack like:
>```
>
>https://insecure-website.com/products?category=Gifts'--
>
>```
This results in the SQL query:
>```
>
>SELECT * FROM products WHERE category = 'Gifts'--' AND released = 1
>
>```
The key thing here is that the double-dash sequence `--` is a comment indicator in SQL, and means that the rest of the query is interpreted as a comment. This effectively removes the remainder of the query, so it no longer includes `AND released = 1`. This means that all products has potential to be displayed, including unreleased products where `released = 0`.
Here's another example from xkcd.
<center>
<img src="https://imgs.xkcd.com/comics/exploits_of_a_mom.png" width="800" align="center"/>
</center>
## 13.8.3.1 Parameter Substitution
From the SQL injection example above, we see that user inputs should not be taken wholesale and it is a good idea to first run a validity check on it before being passed to the SQL statements. As such, we can use **parameter substitution**, which makes use of the `?` symbol and optional arguments in the `execute()` method in `sqlite3`.
### Example 32
Consider the following snippet of Python code to delete records in the SQL database where `ID` is between 2 and 4.
```
# The symbols `?` are placeholders for user inputs
# the second argument in the execute() method is a tuple of user inputs to use for substitution
# Parameter substitution follows the same order in which the placeholders appear in the SQL
execute("DELETE FROM Book WHERE ID > ? AND ID < ?", (2, 4))
```
### Example 33
The following program can be used to enter new borrowers into the `library.db` database:
```
import sqlite3
connection = sqlite3.connect("library.db")
while True:
first = input("Enter first name: ")
surname = input("Enter surname: ")
contact = int(input("Enter contact number: "))
#Note that at this point in the code, we can run the validation checks on the values first, surname and contact
#before wew pass it to the SQL statement below
connection.execute("INSERT INTO Borrower(FirstName, Surname, Contact) VALUES(?, ?, ?)", (first, surname, contact))
connection.commit()
if input("Continue (Y/N)?").upper() != 'Y':
break
connection.close()
```
|
github_jupyter
|
> {
> <attribute_name_1>: <attribute_values_1>,
> <attribute_name_2>: <attribute_values_2>
> ....
> }
>```
which looks like python `dict` object.
Terminologies used for MongoDB is a little different compared with SQL. Below is the table of terms in MongoDB with corresponding terms in SQL.
<center>
| **SQL Term** | **MongoDB Term** |
|-|-|
| `Database` | `Database` |
| `Table` | `Collection` |
| `Row/Record` | `Document` |
| `Column/Field/Attribute` | `Field` |
</center>
## 13.3.1 Running MongoDB
After installation, open command prompt and type `mongo` to run MongoDB shell. To maintain access to the MongoDB databases, you need to **make sure that MongoDB is running**, i.e. don't close MongoDB shell.
<center>
<img src="images/database_create.gif" width="1080" align="center"/>
</center>
> If you encounter an error, MongoDB folder might not have been added to the PATH environment variable. Click <a href = 'https://dangphongvanthanh.wordpress.com/2017/06/12/add-mongos-bin-folder-to-the-path-environment-variable/' >here</a> for troubleshooting.
Some useful commands to run on MongoDB shell
- `help` : get the available shell commands
- `show dbs` : show the currently available databases in MongoDB
- `use <db_name>` : set current database to `<db_name>`
- `db.createCollection(<collection_name>)` : create collection named `<collection_name>` in the database
- after you have set your current database, you can insert documents into the database by running `db.<collection_name>.insert(<json_obj>)`
- `show collections` : show the available collections in the current database
> Instead of creating collection with `db.createCollection(<collection_name>)`, `db.<collection_name>.insert(<json_obj>)` will automatically create the collection with the document is added.
### Exercise
On MongoDB shell, create a database called `test_info` and insert the following JSON object as a document in the collection `Person` in the database.
>```python
>{
> 'name':'John Lim',
> 'class': '18S01',
> 'hobbies': ['running','kayaking','gaming']
>}
>```
## 13.3.1 Interacting with MongoDB with `pymongo`
Similar to relational, we need to know how to execute the important database operations (CRUD) with MongoDB as well. However, for MongoDB, we will skip on the MongoDB shell commands and go straight up to the commands in `pymongo`, which is a Python to interact with MongoDB databases (as warned earlier, keep the MongoDB running else you will encounter errors.)
## 13.8.1 Connecting to MongoDB database with `pymongo`
Roughly speaking, to work with the database,
1. We first **establish connection** to the MongoDB server by creating `pymongo.MongoClient` object to `localhost` with the default port `270107`
2. Access the database through the client.
3. Access the collection through the database.
4. Do your query, insertion, updating and deletion.
### Example 26
The code below illustrates the process of connecting to the database `test_info` and accessing the collection `Person` with `pymongo`.
# 13.8.2 CRUD operations with `pymongo`
Unlike `sqlite` which do CRUD operations by passing SQL statements into the `execute` command, the CRUD operations with `pymongo` is done through various methods to the objects found in `pymongo`. Some of the methods act on `pymongo.collection.Collection` objects and they are:
- `insert_one()` : insert one document into a collection
- `insert_many()` : insert more than one document into a collection
- `find()` : object to query documents from the collection
- `update_one()` : object to update a document in the collection
- `update_many()` : object to update more than one document in the collection
## 13.8.2.1 Creating Database and Collection
To create databases and collection in MongoDB with `pymongo` is a simple task. We just need to
- connect to a **running** MongoDB server,
- create a connection through `MongoClient` object
- access the database through the connection object by treating it like a Python `dict` object
- access the collection through the database object also by treating it like a Python `dict` object.
So the boilerplate code is as such
# 13.8.2.2 Creating Documents in a collection
There are two ways to insert documents into a MongoDB database with `pymongo` and they following the following steps:
1. Access the collection that you want to insert document(s) to
2. Use either,
- `insert_one(doc)`, where `doc` is JSON-like object (think Python `dict`). Note that the keys in the key-value pairs are string objects.
- `insert_many(docs)` : where `docs` is a Python `list` object containing the JSON-like objects.
> When inserting documents in MongodDB, there is a special field called `_id` that acts like a primary key that is automatically created and inserted as an attribute in the document inserted when it's not supplied. In the event we need to specify the `_id` attribute, we should include it in the JSON objects we're passing into the methods.
### Exercise
Add the following documents into `Person` collection in the `test_info` database using `insert_one` and `insert_many` respectively.
1. `{ "name": "Ben", "address": "Park Lane 38"}`,
2. `{ "name": "Amy", "address": "Apple st 652"}`, `{ "name": "Sandy", "address": "Ocean blvd 2"}`, `{ "name": "Ben", "address": "Park Lane 38"}`.
# 13.8.2.2 Query Documents in a collection
Querying for documents in collection is done through `find()` method of `pymongo.collection.Collection` object. It acts like `SELECT` statement for SQL.
- when no parameters is passed into `find()`, it will return a `Cursor` object that contains **all** the documents in the collection.
- the interesting thing happens when you have specific attributes that you want to inspect from the collection and this is where things could get a little complicated. To do this, we pass `{<attribute_name_1>:<value_1>,{<attribute_name_2>:<value_2>/,....}` as an argument, where `...` represents more attributes to cover.
### Exercise
Add the following documents into `Person` collection in the `test_info` database using `insert_one` and `insert_many` respectively.
1. `{ "name": "Ben", "address": "Park Lane 38"}`,
2. `{ "name": "Amy", "address": "Apple st 652"}`, `{ "name": "Sandy", "address": "Ocean blvd 2"}`, `{ "name": "Ben", "address": "Park Lane 38"}`.
# 13.4 Situations to use SQL or NoSQL
The choice of whether to use a SQL or NoSQL database depends on the type of data being stored as well as the nature of tasks that the database is required to perform.
SQL databases should be used if:
- The data being stored has a fixed schema.
- Complex and varied queries will be frequently performed.
- The atomicity, consistency,isolation and durability (ACID) properties are critical to the database.
- There will be a high number of simultaneous transactions.
NoSQL databases should be used if:
- The data being stored has a dynamic schema, (i.e., unstructured data with flexible data types).
- Data storage needs to be performed quickly.
- There will be an extremely large amount of data (i.e., Big Data).
2. Try the following code and check the database again. What do you observe?
> It is important to run the commit() method to save the changes to the database. This is equivalent to the action `Write Changes` we used in DB4S.
Alternative to saving the database manually using `commit()` method, similar to file I/O, we can also utilize the `with` statement in Python.
### Exercise 28 [`INSERT`, `DELETE`, `UPDATE`]
1. Try out the following code and check the database with DB4S.
- What changes do you expect from executing the code?
- What do you observe about the database?
2. Try out the following code and check the database again with DB4S.
- What changes do you expect from executing the code?
- What do you observe about the database?
# 13.3 Advantages of NoSQL Databases over Relational Databases
- Relational databases have a predefined schema that is difficult to change. Even if youw ish to add a fieldto a small number of records, you still need to include the field for the entire table. Therefore, it can be difficult to support the processing of unstructured data using relational databases compared to NoSQL databases.
- Unlike NoSQL databases, relational databases do not usually support hierarchical data storage, where less frequently-used data is moved to cheaper, slower storage devices. This means that the cost of storing data in a relational database is more expensive than storing the same amount of data in a NoSQL database.
- Relational databases are mainly vertically scalable while NoSQL databases are mainly horizontally scalable. Vertically scalable means that improving the performance of a relational database server usually requires upgrading an existing server with faster processors and more memory. Such high-performance components can be expensive and upgrades are limited by the capacity of a single machine. On the other hand, horizontally scalable means that the performance of a NoSQL database can be improved by simply increasing the number of servers. This is relatively cheaper as mass-produced average-performance computers are easily available at low prices.
- Relational databases are stored in a server, which makes the database unavailable when the server fails. NoSQL databases are designed to take advantage of multiple servers so that if one server fails, the other servers can continue to support applications.
> `rollback()` method undo the changes to the database. This is equivalent to the action `Revert Changes` we used in DB4S.
### Example 29 [`CREATE TABLE, DROP TABLE`]
Try out the following code blocks and check the database with DB4S. What do you expect the code blocks do?
The last of the CRUD operation we will discuss is the Read/Retrieve operation. We will show 4 ways to do this with `sqlite3` module.
1. iterate the `Cursor` object, which is also created when we run `execute()` method on `Connection` object,
2. use the `fetchone()` method of the `Cursor` object.
3. use the `fetchall()` method of the `Cursor` object.
4. setting the `row_factory` attribute of the `Connection` object as `sqlite3.Row` object. `Row` provides both index-based and case-insensitive name-based access to columns and most useful when we want name-based access to columns.
### Example 30 [`SELECT`]
Try out the following code block 3 times with appropriate commenting and uncommenting of the relevant parts of the code. What can you observe about the type of the output given by each of the approach?
### Example 31 [`SELECT`]
Try out the following code block 5 times with appropriate commenting and uncommenting of the relevant parts of the code. What can you observe about each output?
The main advantage of using `Row` objects is that they are more flexible as they behave like `dict` objects; we can index values by column name instead of relying on the order of columns in the original `SELECT` statement.
# 13.8.3 SQL Injection Protection with `sqlite3`
SQL injection is a web security vulnerability that allows an attacker to interfere with the queries that an application makes to its SQL database. It generally allows an attacker to view data that they are not normally able to retrieve. This might include data belonging to other users, or any other data that the application itself is able to access. In many cases, an attacker can modify or delete this data, causing persistent changes to the application's content or behavior.
Consider a shopping application that displays products in different categories. When the user clicks on the Gifts category, their browser requests the URL:
>```
>
>https://insecure-website.com/products?category=Gifts
>
>```
This causes the application to make an SQL query to retrieve details of the relevant products from the database:
>```
>
>SELECT * FROM products WHERE category = 'Gifts' AND released = 1
>
>```
Note that we can presume that:
- This SQL query asks the database to return all details (`*`) from the `products` table where the category is `"Gifts"` and `released` is `1`.
- The restriction `released = 1` is being used to hide products that are not released. For unreleased products, presumably `released = 0`.
The application doesn't implement any defenses against SQL injection attacks, so an attacker can construct an attack like:
>```
>
>https://insecure-website.com/products?category=Gifts'--
>
>```
This results in the SQL query:
>```
>
>SELECT * FROM products WHERE category = 'Gifts'--' AND released = 1
>
>```
The key thing here is that the double-dash sequence `--` is a comment indicator in SQL, and means that the rest of the query is interpreted as a comment. This effectively removes the remainder of the query, so it no longer includes `AND released = 1`. This means that all products has potential to be displayed, including unreleased products where `released = 0`.
Here's another example from xkcd.
<center>
<img src="https://imgs.xkcd.com/comics/exploits_of_a_mom.png" width="800" align="center"/>
</center>
## 13.8.3.1 Parameter Substitution
From the SQL injection example above, we see that user inputs should not be taken wholesale and it is a good idea to first run a validity check on it before being passed to the SQL statements. As such, we can use **parameter substitution**, which makes use of the `?` symbol and optional arguments in the `execute()` method in `sqlite3`.
### Example 32
Consider the following snippet of Python code to delete records in the SQL database where `ID` is between 2 and 4.
### Example 33
The following program can be used to enter new borrowers into the `library.db` database:
| 0.889 | 0.954942 |
```
import numpy as np
from astropy.table import Table
import pandas as pd
import h5py
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
ra_lims = (89.0,91.0)
dec_lims = (-26.0, -28.0)
f = h5py.File('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/GalaxyTruth.hdf', "r")
galaxy_data_keys = f.keys()
del f
f = h5py.File('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/StarTruth.hdf', "r")
star_data_keys = f.keys()
del f
galaxy_datas = []
#star_datas = []
for key in galaxy_data_keys:
g = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/GalaxyTruth.hdf',\
key=key)
cut_g = g[np.logical_and(np.logical_and(g['raICRS']> np.min(ra_lims),g['raICRS']< np.max(ra_lims) ),\
np.logical_and(g['decICRS']> np.min(dec_lims),g['decICRS']< np.max(dec_lims) ))]
if len(cut_g)>0:
galaxy_datas.append(cut_g)
galaxy_data = pd.concat(galaxy_datas)
#galaxy_data = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/GalaxyTruth.hdf', key = 'truth20')
for key in star_data_keys:
s = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/StarTruth.hdf', key = key)
cut_s = s[np.logical_and(np.logical_and(s['raICRS']> np.min(ra_lims),s['raICRS']< np.max(ra_lims) ),\
np.logical_and(s['decICRS']> np.min(dec_lims),s['decICRS']< np.max(dec_lims) ))]
if len(cut_s)>0:
galaxy_datas.append(cut_s)
galaxy_data = pd.concat(galaxy_datas)
f = h5py.File("/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/coadd-DC1-imsim-dithered.hdf", "r")
catalog_keys = f.keys()
del f
catalogs = []
ras = []
decs = []
for key in catalog_keys:
c = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/coadd-DC1-imsim-dithered.hdf',\
key=key)
cut_c = c[np.logical_and(np.logical_and(c['coord_ra']*180/np.pi> np.min(ra_lims),c['coord_ra']*180/np.pi< np.max(ra_lims) ),\
np.logical_and(c['coord_dec']*180/np.pi> np.min(dec_lims),c['coord_dec']*180/np.pi< np.max(dec_lims) ))]
if len(cut_c)>0:
catalogs.append(cut_c)
catalog = pd.concat(catalogs)
#ras = np.array(ras)
#decs = np.array(decs)
```
```
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'])
```
```
#plt.scatter(star_data['raICRS'], star_data['decICRS'], color = 'r', alpha = 0.2)
plt.scatter(catalog['coord_ra']*180/np.pi, catalog['coord_dec']*180/np.pi, alpha = 0.2)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'],color = 'g')
#plt.scatter(star_data['raICRS'], star_data['decICRS'], color = 'r', alpha = 0.2)
plt.scatter(catalog['coord_ra']*180/np.pi, catalog['coord_dec']*180/np.pi)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'],color = 'g', alpha = 0.2, s= 100-3*galaxy_data['r_mag'])
plt.xlim([90.25, 90.3])
plt.ylim([-27.25, -27.3])
len(catalog)
len(galaxy_data)
catalog.to_hdf('small_output_data.hdf5', key = 'output1')
galaxy_data.to_hdf('small_input_data.hdf5', key = 'input1')
mag_limit = 25
catalog = catalog[np.logical_and(~np.isnan(catalog['cmodelMag']), catalog['cmodelMag']< mag_limit)]
galaxy_data = galaxy_data[np.logical_and(~np.isnan(galaxy_data['r_mag']), galaxy_data['r_mag']<mag_limit)]
bins = np.linspace(13, 40, 101)
plt.hist(catalog['cmodelMag'][~np.isnan(catalog['cmodelMag'])], bins = bins)
plt.hist(galaxy_data['r_mag'][~np.isnan(galaxy_data['r_mag'])], alpha = 0.3, bins = bins)
plt.yscale('log')
#plt.scatter(star_data['raICRS'], star_data['decICRS'], color = 'r', alpha = 0.2)
plt.scatter(catalog['coord_ra']*180/np.pi, catalog['coord_dec']*180/np.pi, s = 10+(30-catalog['cmodelMag'])**2)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'],color = 'g', alpha = 0.5, s= 10+(30-galaxy_data['r_mag'])**2)
plt.xlim([90.25, 90.3])
plt.ylim([-27.25, -27.3])
```
|
github_jupyter
|
import numpy as np
from astropy.table import Table
import pandas as pd
import h5py
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
ra_lims = (89.0,91.0)
dec_lims = (-26.0, -28.0)
f = h5py.File('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/GalaxyTruth.hdf', "r")
galaxy_data_keys = f.keys()
del f
f = h5py.File('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/StarTruth.hdf', "r")
star_data_keys = f.keys()
del f
galaxy_datas = []
#star_datas = []
for key in galaxy_data_keys:
g = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/GalaxyTruth.hdf',\
key=key)
cut_g = g[np.logical_and(np.logical_and(g['raICRS']> np.min(ra_lims),g['raICRS']< np.max(ra_lims) ),\
np.logical_and(g['decICRS']> np.min(dec_lims),g['decICRS']< np.max(dec_lims) ))]
if len(cut_g)>0:
galaxy_datas.append(cut_g)
galaxy_data = pd.concat(galaxy_datas)
#galaxy_data = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/GalaxyTruth.hdf', key = 'truth20')
for key in star_data_keys:
s = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/StarTruth.hdf', key = key)
cut_s = s[np.logical_and(np.logical_and(s['raICRS']> np.min(ra_lims),s['raICRS']< np.max(ra_lims) ),\
np.logical_and(s['decICRS']> np.min(dec_lims),s['decICRS']< np.max(dec_lims) ))]
if len(cut_s)>0:
galaxy_datas.append(cut_s)
galaxy_data = pd.concat(galaxy_datas)
f = h5py.File("/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/coadd-DC1-imsim-dithered.hdf", "r")
catalog_keys = f.keys()
del f
catalogs = []
ras = []
decs = []
for key in catalog_keys:
c = pd.read_hdf('/nfs/farm/g/desc/u2/data/imsim_deep/DC1_coadd_catalogs/coadd-DC1-imsim-dithered.hdf',\
key=key)
cut_c = c[np.logical_and(np.logical_and(c['coord_ra']*180/np.pi> np.min(ra_lims),c['coord_ra']*180/np.pi< np.max(ra_lims) ),\
np.logical_and(c['coord_dec']*180/np.pi> np.min(dec_lims),c['coord_dec']*180/np.pi< np.max(dec_lims) ))]
if len(cut_c)>0:
catalogs.append(cut_c)
catalog = pd.concat(catalogs)
#ras = np.array(ras)
#decs = np.array(decs)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'])
#plt.scatter(star_data['raICRS'], star_data['decICRS'], color = 'r', alpha = 0.2)
plt.scatter(catalog['coord_ra']*180/np.pi, catalog['coord_dec']*180/np.pi, alpha = 0.2)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'],color = 'g')
#plt.scatter(star_data['raICRS'], star_data['decICRS'], color = 'r', alpha = 0.2)
plt.scatter(catalog['coord_ra']*180/np.pi, catalog['coord_dec']*180/np.pi)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'],color = 'g', alpha = 0.2, s= 100-3*galaxy_data['r_mag'])
plt.xlim([90.25, 90.3])
plt.ylim([-27.25, -27.3])
len(catalog)
len(galaxy_data)
catalog.to_hdf('small_output_data.hdf5', key = 'output1')
galaxy_data.to_hdf('small_input_data.hdf5', key = 'input1')
mag_limit = 25
catalog = catalog[np.logical_and(~np.isnan(catalog['cmodelMag']), catalog['cmodelMag']< mag_limit)]
galaxy_data = galaxy_data[np.logical_and(~np.isnan(galaxy_data['r_mag']), galaxy_data['r_mag']<mag_limit)]
bins = np.linspace(13, 40, 101)
plt.hist(catalog['cmodelMag'][~np.isnan(catalog['cmodelMag'])], bins = bins)
plt.hist(galaxy_data['r_mag'][~np.isnan(galaxy_data['r_mag'])], alpha = 0.3, bins = bins)
plt.yscale('log')
#plt.scatter(star_data['raICRS'], star_data['decICRS'], color = 'r', alpha = 0.2)
plt.scatter(catalog['coord_ra']*180/np.pi, catalog['coord_dec']*180/np.pi, s = 10+(30-catalog['cmodelMag'])**2)
plt.scatter(galaxy_data['raICRS'], galaxy_data['decICRS'],color = 'g', alpha = 0.5, s= 10+(30-galaxy_data['r_mag'])**2)
plt.xlim([90.25, 90.3])
plt.ylim([-27.25, -27.3])
| 0.306838 | 0.375506 |
```
import pandas as pd
from bokeh.plotting import figure
from bokeh.layouts import layout, widgetbox
from bokeh.models import ColumnDataSource, HoverTool, BoxZoomTool, ResetTool, PanTool
from bokeh.models.widgets import Slider, Select, TextInput, Div
from bokeh.models import WheelZoomTool, SaveTool, LassoSelectTool
from bokeh.io import curdoc
from functools import lru_cache
@lru_cache()
def load_data():
df = pd.read_csv('strava_data.csv', index_col=0)
return df
run_data_df = load_data()
# all_weeks = list(set(list(load_data()['week'])))
all_weeks = list(load_data()['week'].unique())
X_AXIS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
desc = Div(text="Weekly runs", width=800)
weeks_runs = Select(title="Runs", options=all_weeks, value="All")
source = ColumnDataSource(data=load_data())
hover = HoverTool(tooltips=[
("Week", "@week"),
("Kilometers", "@kms"),
])
TOOLS = [
hover, BoxZoomTool(), LassoSelectTool(), WheelZoomTool(), PanTool(),
ResetTool(), SaveTool()
]
p = figure(
plot_height=600,
plot_width=700,
title="Weekly running",
tools=TOOLS,
x_axis_label="kms",
y_axis_label="day od the week",
toolbar_location="above",
x_range=X_AXIS,
x_minor_ticks=2, y_range=(0, 15),)
p.vbar(x='day_of_week', bottom=0, top='kms',
color='blue', width=0.75,
legend='Actual', source=source)
def select_weeks():
""" Use the current selections to determine which filters to apply to the
data. Return a dataframe of the selected data
"""
df = load_data()
# Determine what has been selected for each widgetd
# cumulative_week_val = weeks_runs.value
week_val = weeks_runs.value
# kilometers_val = kms.value
# Filter by week and weekly_actual_cumulative
if week_val == "week 01":
selected = df[df.week == 'week 01']
else:
selected = df[(df.week == week_val)]
# Further filter by string in title if it is provided
# if title_val != "":
# selected = selected[selected.title.str.contains(title_val, case=False) == True]
# # Example showing how to update the description
desc.text = f"Week: {week_val}"
return selected
def update():
""" Get the selected data and update the data in the source
"""
df_active = select_weeks()
source.data = ColumnDataSource(data=df_active).data
def selection_change(attrname, old, new):
""" Function will be called when the poly select (or other selection tool)
is used. Determine which items are selected and show the details below
the graph
"""
selected = source.selected["1d"]["indices"]
df_active = select_weeks()
if selected:
data = df_active.iloc[selected, :]
temp = data.set_index("week").T.reindex(index=col_order)
details.text = temp.style.render()
else:
details.text = "Selection Details"
controls = [weeks_runs]
for control in controls:
control.on_change("value", lambda attr, old, new: update())
source.on_change("selected", selection_change)
inputs = widgetbox(*controls, sizing_mode="fixed")
l = layout([[desc], [weeks_runs, p]], sizing_mode="fixed")
update()
curdoc().add_root(l)
curdoc().title = "Yearly run analysis"
run_data_df.head(10)
```
|
github_jupyter
|
import pandas as pd
from bokeh.plotting import figure
from bokeh.layouts import layout, widgetbox
from bokeh.models import ColumnDataSource, HoverTool, BoxZoomTool, ResetTool, PanTool
from bokeh.models.widgets import Slider, Select, TextInput, Div
from bokeh.models import WheelZoomTool, SaveTool, LassoSelectTool
from bokeh.io import curdoc
from functools import lru_cache
@lru_cache()
def load_data():
df = pd.read_csv('strava_data.csv', index_col=0)
return df
run_data_df = load_data()
# all_weeks = list(set(list(load_data()['week'])))
all_weeks = list(load_data()['week'].unique())
X_AXIS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
desc = Div(text="Weekly runs", width=800)
weeks_runs = Select(title="Runs", options=all_weeks, value="All")
source = ColumnDataSource(data=load_data())
hover = HoverTool(tooltips=[
("Week", "@week"),
("Kilometers", "@kms"),
])
TOOLS = [
hover, BoxZoomTool(), LassoSelectTool(), WheelZoomTool(), PanTool(),
ResetTool(), SaveTool()
]
p = figure(
plot_height=600,
plot_width=700,
title="Weekly running",
tools=TOOLS,
x_axis_label="kms",
y_axis_label="day od the week",
toolbar_location="above",
x_range=X_AXIS,
x_minor_ticks=2, y_range=(0, 15),)
p.vbar(x='day_of_week', bottom=0, top='kms',
color='blue', width=0.75,
legend='Actual', source=source)
def select_weeks():
""" Use the current selections to determine which filters to apply to the
data. Return a dataframe of the selected data
"""
df = load_data()
# Determine what has been selected for each widgetd
# cumulative_week_val = weeks_runs.value
week_val = weeks_runs.value
# kilometers_val = kms.value
# Filter by week and weekly_actual_cumulative
if week_val == "week 01":
selected = df[df.week == 'week 01']
else:
selected = df[(df.week == week_val)]
# Further filter by string in title if it is provided
# if title_val != "":
# selected = selected[selected.title.str.contains(title_val, case=False) == True]
# # Example showing how to update the description
desc.text = f"Week: {week_val}"
return selected
def update():
""" Get the selected data and update the data in the source
"""
df_active = select_weeks()
source.data = ColumnDataSource(data=df_active).data
def selection_change(attrname, old, new):
""" Function will be called when the poly select (or other selection tool)
is used. Determine which items are selected and show the details below
the graph
"""
selected = source.selected["1d"]["indices"]
df_active = select_weeks()
if selected:
data = df_active.iloc[selected, :]
temp = data.set_index("week").T.reindex(index=col_order)
details.text = temp.style.render()
else:
details.text = "Selection Details"
controls = [weeks_runs]
for control in controls:
control.on_change("value", lambda attr, old, new: update())
source.on_change("selected", selection_change)
inputs = widgetbox(*controls, sizing_mode="fixed")
l = layout([[desc], [weeks_runs, p]], sizing_mode="fixed")
update()
curdoc().add_root(l)
curdoc().title = "Yearly run analysis"
run_data_df.head(10)
| 0.593374 | 0.459804 |
```
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import EditedNearestNeighbours
def make_data(sep):
"""
`make_data` creates artificial data using sklearn `make_classification` function.
It returns a pandas DataFrame that consists of 2 features and the a 2-class target
column.
sep [float]: The factor multiplying the hypercube size. Larger values spread out the
clusters/classes and make the classification task easier.
"""
X, y = make_classification(
n_samples=1000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
class_sep=sep,
random_state=42
)
X = pd.DataFrame(X, columns=["varA", "varB"])
y = pd.Series(y)
return X, y
# Visualization of data used in this notebook
for sep in np.arange(0, 3):
X, y = make_data(sep)
print(f"\n {y.value_counts()}")
sns.scatterplot(data=X, x="varA", y="varB", hue=y)
plt.title(f"Separation {sep}")
plt.show()
```
## Undersampling with edited nearest neighbours
### Well-separated classes
```
X, y = make_data(sep=2)
# auto undersamples every class but the minority class
# kind_sel specifies the mode of agreeing of the method
enn = EditedNearestNeighbours(sampling_strategy="auto", n_neighbors=3, kind_sel="all")
X_resampled, y_resampled = enn.fit_resample(X, y)
X.shape, y.shape
y.value_counts()
X_resampled.shape, y_resampled.shape
y_resampled.value_counts()
# Original dataset
sns.scatterplot(data=X, x="varA", y="varB", hue=y)
plt.title("Original dataset")
sns.scatterplot(data=X_resampled, x="varA", y="varB", hue=y_resampled)
plt.title("Undersampled dataset")
```
### Less separable dataset
In a less separable dataset, CNN will yield a less-condensend dataset but Tomek links will remove some of the "noisy" points
```
X, y = make_data(sep=0.3)
enn = EditedNearestNeighbours(sampling_strategy="auto", n_neighbors=3, kind_sel="all")
X_resampled, y_resampled = enn.fit_resample(X, y)
print(f"Original shape: {X.shape, y.shape}\n")
print(f"Undersampled dataset shape: {X_resampled.shape, y_resampled.shape}")
sns.scatterplot(data=X, x="varA", y="varB", hue=y)
plt.title("Original dataset")
sns.scatterplot(data=X_resampled, x="varA", y="varB", hue=y_resampled)
plt.title("Undersampled dataset")
```
# KDD 2004
## Condensed dataset
```
# Dataset path
dataset_folder = pathlib.Path("../../../Datasets")
dataset_file = "kdd2004.csv"
dataset_path = dataset_folder/dataset_file
if not dataset_path.exists():
raise FileExistsError(f"This path doesn't correspond to any file: {dataset_path.resolve()}")
# We only take a chunk of the original
# dataset to speed up computations as this is just an example
df = pd.read_csv(dataset_path).sample(10000)
df["target"] = df["target"].map({-1:0, 1:1})
print(f"The shape of the dataset: {df.shape}\n")
print(f"The distribution of the target column:\n {df.target.value_counts(normalize=True)}")
X_train, X_test, y_train, y_test = train_test_split(df.drop(labels=["target"], axis=1),
df["target"],
train_size=0.7)
X_train.shape, X_test.shape
enn = EditedNearestNeighbours(sampling_strategy="auto", n_neighbors=3, kind_sel="all")
X_resampled, y_resampled = enn.fit_resample(X_train, y_train)
print(f"Original train dataset shape: {X_train.shape, y_train.shape}\n")
print(f"Orignal train dataset target distribution:\n {y_train.value_counts()}\n")
print(30 * "-")
print(f"\nResampled train dataset shape: {X_resampled.shape, y_resampled.shape}\n")
print(f"Resampled train dataset target distribution:\n {y_resampled.value_counts()}")
sns.scatterplot(data=X_train, x="0", y="1", hue=y_train, alpha=0.5)
plt.title("Original dataset")
sns.scatterplot(data=X_resampled, x="0", y="1", hue=y_resampled, alpha=0.5)
plt.title("Condensed dataset")
```
## Models performance comparison
Practically, there isn't much difference between the two dataset. The comparison is not really worth it
```
def build_run_rf(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=200, random_state=42, max_depth=4)
rf.fit(X_train, y_train)
print("Train set\n")
train_pred = rf.predict_proba(X_train)
print(f"Random forest ROC-AUC: {roc_auc_score(y_train, train_pred[:, 1])}\n")
print("Test set\n")
test_pred = rf.predict_proba(X_test)
print(f"Random forest ROC-AUC: {roc_auc_score(y_test, test_pred[:, 1])}")
# Original dataset
build_run_rf(X_train, X_test, y_train, y_test)
# Resampled dataset
build_run_rf(X_resampled, X_test, y_resampled, y_test)
```
|
github_jupyter
|
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import EditedNearestNeighbours
def make_data(sep):
"""
`make_data` creates artificial data using sklearn `make_classification` function.
It returns a pandas DataFrame that consists of 2 features and the a 2-class target
column.
sep [float]: The factor multiplying the hypercube size. Larger values spread out the
clusters/classes and make the classification task easier.
"""
X, y = make_classification(
n_samples=1000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
class_sep=sep,
random_state=42
)
X = pd.DataFrame(X, columns=["varA", "varB"])
y = pd.Series(y)
return X, y
# Visualization of data used in this notebook
for sep in np.arange(0, 3):
X, y = make_data(sep)
print(f"\n {y.value_counts()}")
sns.scatterplot(data=X, x="varA", y="varB", hue=y)
plt.title(f"Separation {sep}")
plt.show()
X, y = make_data(sep=2)
# auto undersamples every class but the minority class
# kind_sel specifies the mode of agreeing of the method
enn = EditedNearestNeighbours(sampling_strategy="auto", n_neighbors=3, kind_sel="all")
X_resampled, y_resampled = enn.fit_resample(X, y)
X.shape, y.shape
y.value_counts()
X_resampled.shape, y_resampled.shape
y_resampled.value_counts()
# Original dataset
sns.scatterplot(data=X, x="varA", y="varB", hue=y)
plt.title("Original dataset")
sns.scatterplot(data=X_resampled, x="varA", y="varB", hue=y_resampled)
plt.title("Undersampled dataset")
X, y = make_data(sep=0.3)
enn = EditedNearestNeighbours(sampling_strategy="auto", n_neighbors=3, kind_sel="all")
X_resampled, y_resampled = enn.fit_resample(X, y)
print(f"Original shape: {X.shape, y.shape}\n")
print(f"Undersampled dataset shape: {X_resampled.shape, y_resampled.shape}")
sns.scatterplot(data=X, x="varA", y="varB", hue=y)
plt.title("Original dataset")
sns.scatterplot(data=X_resampled, x="varA", y="varB", hue=y_resampled)
plt.title("Undersampled dataset")
# Dataset path
dataset_folder = pathlib.Path("../../../Datasets")
dataset_file = "kdd2004.csv"
dataset_path = dataset_folder/dataset_file
if not dataset_path.exists():
raise FileExistsError(f"This path doesn't correspond to any file: {dataset_path.resolve()}")
# We only take a chunk of the original
# dataset to speed up computations as this is just an example
df = pd.read_csv(dataset_path).sample(10000)
df["target"] = df["target"].map({-1:0, 1:1})
print(f"The shape of the dataset: {df.shape}\n")
print(f"The distribution of the target column:\n {df.target.value_counts(normalize=True)}")
X_train, X_test, y_train, y_test = train_test_split(df.drop(labels=["target"], axis=1),
df["target"],
train_size=0.7)
X_train.shape, X_test.shape
enn = EditedNearestNeighbours(sampling_strategy="auto", n_neighbors=3, kind_sel="all")
X_resampled, y_resampled = enn.fit_resample(X_train, y_train)
print(f"Original train dataset shape: {X_train.shape, y_train.shape}\n")
print(f"Orignal train dataset target distribution:\n {y_train.value_counts()}\n")
print(30 * "-")
print(f"\nResampled train dataset shape: {X_resampled.shape, y_resampled.shape}\n")
print(f"Resampled train dataset target distribution:\n {y_resampled.value_counts()}")
sns.scatterplot(data=X_train, x="0", y="1", hue=y_train, alpha=0.5)
plt.title("Original dataset")
sns.scatterplot(data=X_resampled, x="0", y="1", hue=y_resampled, alpha=0.5)
plt.title("Condensed dataset")
def build_run_rf(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=200, random_state=42, max_depth=4)
rf.fit(X_train, y_train)
print("Train set\n")
train_pred = rf.predict_proba(X_train)
print(f"Random forest ROC-AUC: {roc_auc_score(y_train, train_pred[:, 1])}\n")
print("Test set\n")
test_pred = rf.predict_proba(X_test)
print(f"Random forest ROC-AUC: {roc_auc_score(y_test, test_pred[:, 1])}")
# Original dataset
build_run_rf(X_train, X_test, y_train, y_test)
# Resampled dataset
build_run_rf(X_resampled, X_test, y_resampled, y_test)
| 0.724968 | 0.808067 |
# Transformer
<div class="alert alert-info">
This tutorial is available as an IPython notebook at [Malaya/example/transformer](https://github.com/huseinzol05/Malaya/tree/master/example/transformer).
</div>
Malaya provided basic interface for Pretrained Transformer encoder models, specific to Malay, local social media slang and Manglish language, we called it Transformer-Bahasa. Below are the list of dataset we pretrained,
Standard Bahasa dataset,
1. [Malay-dataset/dumping](https://github.com/huseinzol05/Malay-Dataset/tree/master/dumping).
2. [Malay-dataset/pure-text](https://github.com/huseinzol05/Malay-Dataset/tree/master/pure-text).
Bahasa social media,
1. [Malay-dataset/dumping/instagram](https://github.com/huseinzol05/Malay-Dataset/tree/master/dumping/instagram).
2. [Malay-dataset/dumping/twitter](https://github.com/huseinzol05/Malay-Dataset/tree/master/dumping/twitter).
Singlish / Manglish,
1. [Malay-dataset/dumping/singlish](https://github.com/huseinzol05/Malay-Dataset/tree/master/dumping/singlish-text).
2. [Malay-dataset/dumping/singapore-news](https://github.com/huseinzol05/Malay-Dataset/tree/master/dumping/singapore-news).
**This interface not able us to use it to do custom training**.
If you want to download pretrained model for Transformer-Bahasa and use it for custom transfer-learning, you can download it here, https://github.com/huseinzol05/Malaya/tree/master/pretrained-model/, some notebooks to help you get started.
Or you can simply use [hugging-face transformers](https://huggingface.co/models?filter=ms) to try transformer models from Malaya, simply check available models from here, https://huggingface.co/models?filter=ms
```
from IPython.core.display import Image, display
display(Image('huggingface.png', width=500))
%%time
import malaya
```
### list Transformer-Bahasa available
```
malaya.transformer.available_transformer()
strings = ['Kerajaan galakkan rakyat naik public transport tapi parking kat lrt ada 15. Reserved utk staff rapid je dah berpuluh. Park kereta tepi jalan kang kene saman dgn majlis perbandaran. Kereta pulak senang kene curi. Cctv pun tak ada. Naik grab dah 5-10 ringgit tiap hari. Gampang juga',
'Alaa Tun lek ahhh npe muka masam cmni kn agong kata usaha kerajaan terdahulu sejak selepas merdeka',
"Orang ramai cakap nurse kerajaan garang. So i tell u this. Most of our local ppl will treat us as hamba abdi and they don't respect us as a nurse"]
```
### Load XLNET-Bahasa
```
xlnet = malaya.transformer.load(model = 'xlnet')
```
I have random sentences copied from Twitter, searched using `kerajaan` keyword.
#### Vectorization
Change a string or batch of strings to latent space / vectors representation.
```python
def vectorize(self, strings: List[str]):
"""
Vectorize string inputs.
Parameters
----------
strings : List[str]
Returns
-------
result: np.array
"""
```
```
v = xlnet.vectorize(strings)
v.shape
```
#### Attention
```python
def attention(self, strings: List[str], method: str = 'last', **kwargs):
"""
Get attention string inputs from bert attention.
Parameters
----------
strings : List[str]
method : str, optional (default='last')
Attention layer supported. Allowed values:
* ``'last'`` - attention from last layer.
* ``'first'`` - attention from first layer.
* ``'mean'`` - average attentions from all layers.
Returns
-------
result : List[List[Tuple[str, float]]]
"""
```
You can give list of strings or a string to get the attention, in this documentation, I just want to use a string.
```
xlnet.attention([strings[1]], method = 'last')
xlnet.attention([strings[1]], method = 'first')
xlnet.attention([strings[1]], method = 'mean')
```
#### Visualize Attention
Before using attention visualization, we need to load D3 into our jupyter notebook first. This visualization borrow from https://github.com/jessevig/bertviz .
```python
def visualize_attention(self, string: str):
"""
Visualize attention.
Parameters
----------
string : str
"""
```
```
%%javascript
require.config({
paths: {
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.8/d3.min',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',
}
});
xlnet.visualize_attention('nak makan ayam dgn husein')
```
_I attached a printscreen, readthedocs cannot visualize the javascript._
```
from IPython.core.display import Image, display
display(Image('xlnet-attention.png', width=300))
```
**All attention models able to use these interfaces.**
### Load ELECTRA-Bahasa
Feel free to use another models.
```
electra = malaya.transformer.load(model = 'electra')
electra.attention([strings[1]], method = 'last')
```
|
github_jupyter
|
from IPython.core.display import Image, display
display(Image('huggingface.png', width=500))
%%time
import malaya
malaya.transformer.available_transformer()
strings = ['Kerajaan galakkan rakyat naik public transport tapi parking kat lrt ada 15. Reserved utk staff rapid je dah berpuluh. Park kereta tepi jalan kang kene saman dgn majlis perbandaran. Kereta pulak senang kene curi. Cctv pun tak ada. Naik grab dah 5-10 ringgit tiap hari. Gampang juga',
'Alaa Tun lek ahhh npe muka masam cmni kn agong kata usaha kerajaan terdahulu sejak selepas merdeka',
"Orang ramai cakap nurse kerajaan garang. So i tell u this. Most of our local ppl will treat us as hamba abdi and they don't respect us as a nurse"]
xlnet = malaya.transformer.load(model = 'xlnet')
def vectorize(self, strings: List[str]):
"""
Vectorize string inputs.
Parameters
----------
strings : List[str]
Returns
-------
result: np.array
"""
v = xlnet.vectorize(strings)
v.shape
def attention(self, strings: List[str], method: str = 'last', **kwargs):
"""
Get attention string inputs from bert attention.
Parameters
----------
strings : List[str]
method : str, optional (default='last')
Attention layer supported. Allowed values:
* ``'last'`` - attention from last layer.
* ``'first'`` - attention from first layer.
* ``'mean'`` - average attentions from all layers.
Returns
-------
result : List[List[Tuple[str, float]]]
"""
xlnet.attention([strings[1]], method = 'last')
xlnet.attention([strings[1]], method = 'first')
xlnet.attention([strings[1]], method = 'mean')
def visualize_attention(self, string: str):
"""
Visualize attention.
Parameters
----------
string : str
"""
%%javascript
require.config({
paths: {
d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.8/d3.min',
jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',
}
});
xlnet.visualize_attention('nak makan ayam dgn husein')
from IPython.core.display import Image, display
display(Image('xlnet-attention.png', width=300))
electra = malaya.transformer.load(model = 'electra')
electra.attention([strings[1]], method = 'last')
| 0.744842 | 0.83622 |
## Importing the libraries
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
## Data Preprocessing
Dataset has 10 features. Each row corresponds to a Wine brand.
Customer Segment(feature) --> corresponds to a group of customers(clusters) that have similar preferences for similar group of wine
**Recommender System**: predict which customer segement a wine belongs to, so we recommened that wine to that particular customer segment.
### Importing the Dataset
```
data = pd.read_csv("Wine.csv")
X = data.iloc[:,:-1].values
y = data.iloc[:,-1].values
```
### Splitting the dataset into the Training set and Test set
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
```
### Feature Scaling
```
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train.shape
```
## Applying Kernel PCA
```
# There are too many features(dimensions) on our dataset, so lets reduce some
from sklearn.decomposition import KernelPCA
k = 2
kpca = KernelPCA(n_components=k, kernel='rbf') # kernel='rbf' - gaussian kernel (radial basis function)
# n_components (principal components) --> no. of features to keep
# start from k=2 --> 2 extracted features (z1,z2) then visualize the new dataaset. If visualization is poor, increase the value of k
# feature scaling and dimensionality reduction --> both must be applied on cross-validation set and test set (only applied on features --> X)
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
X_train.shape
```
## Training the Logistic Regression model on the Training set
```
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
```
## Confusion Matrix
Find no. of incorrect predictions and correct predictions
```
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test) # Predict results on new observations
confuse_matrix = confusion_matrix(y_test,y_pred) # compare actual labels (test) with predicted labels
print(confuse_matrix)
accuracy_score(y_test, y_pred)
# confusion matrix of 3 rows and 3 columns --> because we have 3 classes here
# Function to plot confusion matrix using Seaborn's heatmap()
def plot_confusion_matrix(confuse_matrix):
fig,ax = plt.subplots(figsize=(8,6))
# Set the font scale
sns.set(font_scale=1.5)
ax = sns.heatmap(
confuse_matrix,
annot=True, # Annote the boxes
cbar=False
)
plt.ylabel("Predicted label")
plt.xlabel("True label")
plot_confusion_matrix(confuse_matrix)
```
## Visualising the Training set results
```
from matplotlib.colors import ListedColormap
plt.figure(figsize=(8,6))
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
# Kernel PCA has different extracted features than PCA
```
PC1 --> z1
PC2 --> z2
contains extracted features of cluster
## Visualising the Test set results
```
from matplotlib.colors import ListedColormap
plt.figure(figsize=(8,6))
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("Wine.csv")
X = data.iloc[:,:-1].values
y = data.iloc[:,-1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train.shape
# There are too many features(dimensions) on our dataset, so lets reduce some
from sklearn.decomposition import KernelPCA
k = 2
kpca = KernelPCA(n_components=k, kernel='rbf') # kernel='rbf' - gaussian kernel (radial basis function)
# n_components (principal components) --> no. of features to keep
# start from k=2 --> 2 extracted features (z1,z2) then visualize the new dataaset. If visualization is poor, increase the value of k
# feature scaling and dimensionality reduction --> both must be applied on cross-validation set and test set (only applied on features --> X)
X_train = kpca.fit_transform(X_train)
X_test = kpca.transform(X_test)
X_train.shape
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test) # Predict results on new observations
confuse_matrix = confusion_matrix(y_test,y_pred) # compare actual labels (test) with predicted labels
print(confuse_matrix)
accuracy_score(y_test, y_pred)
# confusion matrix of 3 rows and 3 columns --> because we have 3 classes here
# Function to plot confusion matrix using Seaborn's heatmap()
def plot_confusion_matrix(confuse_matrix):
fig,ax = plt.subplots(figsize=(8,6))
# Set the font scale
sns.set(font_scale=1.5)
ax = sns.heatmap(
confuse_matrix,
annot=True, # Annote the boxes
cbar=False
)
plt.ylabel("Predicted label")
plt.xlabel("True label")
plot_confusion_matrix(confuse_matrix)
from matplotlib.colors import ListedColormap
plt.figure(figsize=(8,6))
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
# Kernel PCA has different extracted features than PCA
from matplotlib.colors import ListedColormap
plt.figure(figsize=(8,6))
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
| 0.73412 | 0.98104 |
```
import pandas as pd
import os
os.chdir('../')
from kernel import utils, data_processing, modelling
```
# 0. ad hoc Preprocessing
## 1. Load Training Data
```
df = pd.read_parquet('./data/train_transaction.parquet').pipe(utils.detect_id_col)
df = pd.read_parquet('./data/train_transaction.parquet').pipe(utils.detect_id_col)
df = pd.concat([df[df.isFraud == 1], df[df.isFraud == 0].sample(frac=0.1)], axis=0)
df_fea, labels = utils.split_feature_target(df, 'isFraud')
df_fea.head()
cat_cols = [f'card{i}' for i in range(1, 7)] + [f'M{i}' for i in range(1, 10)]
cat_cols += ['P_emaildomain', 'R_emaildomain', 'ProductCD', 'addr1', 'addr2']
```
# 1. Build LR Meta Feature
```
dp = data_processing.GenericDataProcessor(df_fea, 'fraud', True, 15, cat_cols=cat_cols)
clf, params_lr, est_lr = modelling.train_lr_classifier(
dp.data.values, labels.values, n_iter=30)
lr = est_lr(**params_lr).fit(dp.data.values, labels.values)
lr.predict_proba(dp.data.values)[:, 1]
df_fea['lr_meta'] = lr.predict_proba(dp.data.values)[:, 1]
lr_meta_train = df_fea.lr_meta.to_dict()
df_fea = None
df['lr_meta'] = df.index.map(lr_meta_train).tolist()
train_data_parth = './data/train_transactions_lr_meta.parquet'
df.reset_index().to_parquet(train_data_parth)
df = None
df_inf = pd.read_csv('./data/test_transaction.csv')
df_inf['lr_meta'] = lr.predict_proba(dp.transform(df_inf))[:, 1]
lr_meta_test = df_inf.lr_meta.to_dict()
df_inf['lr_meta'] = df_inf.index.map(lr_meta_test).tolist()
inf_data_path = './data/test_transactions_lr_meta.parquet'
df_inf.to_parquet(inf_data_path)
df_inf = None
```
# 2. Catboost
```
df = pd.read_parquet(train_data_path)
df_fea, labels = utils.split_feature_target(df, 'isFraud')
df_fea.head()
df_fea, _, _ = data_processing.catboost_preprocessing(df_fea, cat_cols=cat_cols)
df_inf = pd.read_parquet(inf_data_path)
df_inf.set_index('TransactionID', inplace=True)
df_inf, _, _ = data_processing.catboost_preprocessing(df_inf, cat_cols=cat_cols)
params_m = {'iterations':5000,
'learning_rate':0.02,
'depth':5,
'eval_metric':'AUC',
'verbose':200,
'od_type':"Iter", # overfit detector
'od_wait':500, # most recent best iteration to wait before stopping
'random_seed': 1
}
cat_model, cr = modelling.train_catboost_classifier(
df_fea, labels, cat_cols, params=params_m, plot=True)
df_inf['isFraud'] = cat_model.predict(df_inf[cr])
df_inf = df_inf.reset_index()[['TransactionID', 'isFraud']]
df_inf.to_csv('./data/inf.csv', index=False)
```
|
github_jupyter
|
import pandas as pd
import os
os.chdir('../')
from kernel import utils, data_processing, modelling
df = pd.read_parquet('./data/train_transaction.parquet').pipe(utils.detect_id_col)
df = pd.read_parquet('./data/train_transaction.parquet').pipe(utils.detect_id_col)
df = pd.concat([df[df.isFraud == 1], df[df.isFraud == 0].sample(frac=0.1)], axis=0)
df_fea, labels = utils.split_feature_target(df, 'isFraud')
df_fea.head()
cat_cols = [f'card{i}' for i in range(1, 7)] + [f'M{i}' for i in range(1, 10)]
cat_cols += ['P_emaildomain', 'R_emaildomain', 'ProductCD', 'addr1', 'addr2']
dp = data_processing.GenericDataProcessor(df_fea, 'fraud', True, 15, cat_cols=cat_cols)
clf, params_lr, est_lr = modelling.train_lr_classifier(
dp.data.values, labels.values, n_iter=30)
lr = est_lr(**params_lr).fit(dp.data.values, labels.values)
lr.predict_proba(dp.data.values)[:, 1]
df_fea['lr_meta'] = lr.predict_proba(dp.data.values)[:, 1]
lr_meta_train = df_fea.lr_meta.to_dict()
df_fea = None
df['lr_meta'] = df.index.map(lr_meta_train).tolist()
train_data_parth = './data/train_transactions_lr_meta.parquet'
df.reset_index().to_parquet(train_data_parth)
df = None
df_inf = pd.read_csv('./data/test_transaction.csv')
df_inf['lr_meta'] = lr.predict_proba(dp.transform(df_inf))[:, 1]
lr_meta_test = df_inf.lr_meta.to_dict()
df_inf['lr_meta'] = df_inf.index.map(lr_meta_test).tolist()
inf_data_path = './data/test_transactions_lr_meta.parquet'
df_inf.to_parquet(inf_data_path)
df_inf = None
df = pd.read_parquet(train_data_path)
df_fea, labels = utils.split_feature_target(df, 'isFraud')
df_fea.head()
df_fea, _, _ = data_processing.catboost_preprocessing(df_fea, cat_cols=cat_cols)
df_inf = pd.read_parquet(inf_data_path)
df_inf.set_index('TransactionID', inplace=True)
df_inf, _, _ = data_processing.catboost_preprocessing(df_inf, cat_cols=cat_cols)
params_m = {'iterations':5000,
'learning_rate':0.02,
'depth':5,
'eval_metric':'AUC',
'verbose':200,
'od_type':"Iter", # overfit detector
'od_wait':500, # most recent best iteration to wait before stopping
'random_seed': 1
}
cat_model, cr = modelling.train_catboost_classifier(
df_fea, labels, cat_cols, params=params_m, plot=True)
df_inf['isFraud'] = cat_model.predict(df_inf[cr])
df_inf = df_inf.reset_index()[['TransactionID', 'isFraud']]
df_inf.to_csv('./data/inf.csv', index=False)
| 0.205615 | 0.680416 |
```
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import eegraph
import os
#1) Read EEG files and transform into NetworkX graphs with EEGRAPH
#============================================================================================================================================================================
def search(values, searchFor):
for k in values:
if (searchFor == k):
return (values[k])
def search_key(values, searchFor):
for key, value in values.items():
if value[0] == searchFor:
return key
def test_empty(graphs, conn_empty_values, conn):
conn_empty_aux = [0] * len(conn_empty_values)
for i in range(len(graphs)):
if(nx.is_empty(graphs[i])):
conn_empty_aux[conn] += 1
conn_empty_values[conn] = conn_empty_values[conn] + conn_empty_aux[conn]
print('Empty:',conn_empty_values,'\n')
return conn_empty_values
def modelate_with_different_connectivity(window_size, label, connectivity_number_total, G, conn_empty_values):
total_graphs_class_0, total_graphs_class_1 = [], []
for i in range(connectivity_number_total):
conn = search_key(connectivity_measures, i)
bands = search(connectivity_measures, conn)[1]
# The threshold can be omited to use the default one
graphs, _ = G.modelate(window_size = window_size, connectivity = conn, bands = bands, threshold = 0.6)
conn_empty_values = test_empty(graphs, conn_empty_values, i)
if(int(label)):
total_graphs_class_1 = total_graphs_class_1 + list(graphs.values())
else:
total_graphs_class_0 = total_graphs_class_0 + list(graphs.values())
return total_graphs_class_0, total_graphs_class_1, conn_empty_values
def open_data_directories(path, window_size_class_0, window_size_class_1, connectivity_number_total, exclude=[None]):
conn_empty_values = [0] * connectivity_number_total
graphs_class_0, graphs_class_1 = [], []
class_files = os.listdir(path)
for entry in class_files:
eeg_files = os.listdir(path + '/' + entry)
for eeg in eeg_files:
eeg_path = (path + '/' + entry + '/' + eeg)
print(eeg_path, entry)
G = eegraph.Graph()
G.load_data(path= eeg_path, exclude = exclude)
if(entry == '1'): # Number 1 corresponds to "espasmo" data
window_size = window_size_class_1
elif (entry == '0'): # Number 0 corresponds to "presalva" data
window_size = window_size_class_0
print('\n=========================================')
final_graphs_class_0, final_graphs_class_1 , conn_empty_values = modelate_with_different_connectivity(window_size=window_size,
label=entry,
connectivity_number_total=con_number_total,
G=G,
conn_empty_values=conn_empty_values)
graphs_class_0 = graphs_class_0 + final_graphs_class_0
graphs_class_1 = graphs_class_1 + final_graphs_class_1
return graphs_class_0, graphs_class_1
"""
connectivity_measures = {'cross_correlation': (0, [None]), 'pearson_correlation': (1, [None]), 'squared_coherence': (2, ['delta', 'theta', 'alpha', 'beta']),
'imag_coherence': (3, ['delta', 'theta', 'alpha', 'beta']), 'corr_cross_correlation': (4, [None]), 'wpli': (5, ['delta', 'theta', 'alpha', 'beta']),
'plv': (6, ['delta', 'theta', 'alpha', 'beta']), 'pli': (7, [None]),
'power_spectrum': (8, ['delta', 'theta', 'alpha', 'beta']), 'spectral_entropy': (9, ['delta', 'theta', 'alpha', 'beta']),
'shannon_entropy': (10, [None])}
"""
connectivity_measures = {'pearson_correlation': (0, [None])} #CONNECTIVITY MEASURES USED
path = 'data' #<--------------- PATH TO FOLDER CONTAINING EEGs
window_size_class_0 = 1 #<--------------- CLASS 0 WINDOW SIZE
window_size_class_1 = 1 #<--------------- CLASS 1 WINDOW SIZE
con_number_total = 1 #<--------------- NUMBER OF CONNECTIVITY MEASURES USED, MUST BE THE SAME AS LENGTH OF DICTIONARY 'connectivity_measures'
graphs_class_0, graphs_class_1 = open_data_directories(path, window_size_class_0, window_size_class_1, con_number_total)
print('\n=========================================')
print('Total graphs Generated for class 0: ', len(graphs_class_0))
print('Total graphs Generated for class 1: ', len(graphs_class_1))
graphs = [graphs_class_0, graphs_class_1]
#2) Visualize graphs
#============================================================================================================================================================================
def visualize_graphs(graphs, selected):
G = eegraph.Graph()
for i in range(selected[0], selected[1]+1):
G.visualize(graphs[i])
wanted = [0, 0] # Graph position
visualize_graphs(graphs_class_1, wanted)
#3)Histogram
#============================================================================================================================================================================
def edges_histogram(graphs, label):
total_edges, edges_dict = [], {}
for i in range(len(graphs)):
edges = [e for e in graphs[i].edges]
edges_dict[str(i+1)] = len(edges)
keys = edges_dict.keys()
values = edges_dict.values()
plt.figure(figsize=(30,15))
plt.title('Histogram: Edges per Graph. Class ' + str(label), fontsize=20)
plt.hist(values, bins=max(values)+1-min(values))
plt.xlabel('Number of edges')
plt.ylabel('Count')
#plt.bar(keys, values, align='center')
plt.show()
print('\n=====================================================================')
for j in range(2):
edges_histogram(graphs[j], j)
#4)Empty graphs
#============================================================================================================================================================================
def empty_graphs(graphs):
empty_graphs, empty_dict = 0, {}
for i in range(len(graphs)):
if(nx.is_empty(graphs[i])):
empty_dict[i] = True
empty_graphs += 1
else:
empty_dict[i] = False
return empty_graphs, empty_dict
print('\n=====================================================================')
empty_amount, graphs_dict = [None]*2, [None]*2
for j in range(2):
empty_amount[j], graphs_dict[j] = empty_graphs(graphs[j])
print('\nNumber of Empty graphs. Class ' + str(j) + ': ' , empty_amount[j])
print('Empty graphs (True).', graphs_dict[j])
#5)Erase Empty Graphs
#============================================================================================================================================================================
def delete_graphs(graphs, graphs_dict):
for key,value in reversed(graphs_dict.items()):
if(value):
print('Deleting graph in index:', str(key))
del graphs[key]
return graphs
print('\n=====================================================================')
print('Deleting empty graphs.')
for j in range(2):
if (empty_amount[j]):
print('\nGraphs in Class', j, ':')
graphs[j] = delete_graphs(graphs[j], graphs_dict[j])
print('\nTotal graphs for class 0: ', len(graphs[0]))
print('Total graphs for class 1: ', len(graphs[1]))
#6)Mean value and Standard Deviation for graphs
#============================================================================================================================================================================
def mean_std(graphs):
edges_weights, edges_dict = [], {}
for i in range(len(graphs)):
edges = [d.get('weight') for e1,e2,d in graphs[i].edges(data=True)]
edges_weights = edges_weights + edges
print('Mean:', round(np.mean(edges_weights),5))
print('STD:', round(np.std(edges_weights),5))
print('\n=====================================================================')
print('Mean values and Standar Deviation for edges in the graphs.')
for j in range(2):
print('\nClass', j, ':')
mean_std(graphs[j])
```
<h1>Conclusión</h1>
<p>Como conclusion, como podemos ver en el grafo y por las medias, en la clase 0, tenemos una media menor que en la clase 1, significando y comprobandolo con el grafo, que el número de conexiones que tiene el cerébro durante el ataque epiléptico es mayor ya que hay más zonas iguales de activación.</p>
|
github_jupyter
|
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import eegraph
import os
#1) Read EEG files and transform into NetworkX graphs with EEGRAPH
#============================================================================================================================================================================
def search(values, searchFor):
for k in values:
if (searchFor == k):
return (values[k])
def search_key(values, searchFor):
for key, value in values.items():
if value[0] == searchFor:
return key
def test_empty(graphs, conn_empty_values, conn):
conn_empty_aux = [0] * len(conn_empty_values)
for i in range(len(graphs)):
if(nx.is_empty(graphs[i])):
conn_empty_aux[conn] += 1
conn_empty_values[conn] = conn_empty_values[conn] + conn_empty_aux[conn]
print('Empty:',conn_empty_values,'\n')
return conn_empty_values
def modelate_with_different_connectivity(window_size, label, connectivity_number_total, G, conn_empty_values):
total_graphs_class_0, total_graphs_class_1 = [], []
for i in range(connectivity_number_total):
conn = search_key(connectivity_measures, i)
bands = search(connectivity_measures, conn)[1]
# The threshold can be omited to use the default one
graphs, _ = G.modelate(window_size = window_size, connectivity = conn, bands = bands, threshold = 0.6)
conn_empty_values = test_empty(graphs, conn_empty_values, i)
if(int(label)):
total_graphs_class_1 = total_graphs_class_1 + list(graphs.values())
else:
total_graphs_class_0 = total_graphs_class_0 + list(graphs.values())
return total_graphs_class_0, total_graphs_class_1, conn_empty_values
def open_data_directories(path, window_size_class_0, window_size_class_1, connectivity_number_total, exclude=[None]):
conn_empty_values = [0] * connectivity_number_total
graphs_class_0, graphs_class_1 = [], []
class_files = os.listdir(path)
for entry in class_files:
eeg_files = os.listdir(path + '/' + entry)
for eeg in eeg_files:
eeg_path = (path + '/' + entry + '/' + eeg)
print(eeg_path, entry)
G = eegraph.Graph()
G.load_data(path= eeg_path, exclude = exclude)
if(entry == '1'): # Number 1 corresponds to "espasmo" data
window_size = window_size_class_1
elif (entry == '0'): # Number 0 corresponds to "presalva" data
window_size = window_size_class_0
print('\n=========================================')
final_graphs_class_0, final_graphs_class_1 , conn_empty_values = modelate_with_different_connectivity(window_size=window_size,
label=entry,
connectivity_number_total=con_number_total,
G=G,
conn_empty_values=conn_empty_values)
graphs_class_0 = graphs_class_0 + final_graphs_class_0
graphs_class_1 = graphs_class_1 + final_graphs_class_1
return graphs_class_0, graphs_class_1
"""
connectivity_measures = {'cross_correlation': (0, [None]), 'pearson_correlation': (1, [None]), 'squared_coherence': (2, ['delta', 'theta', 'alpha', 'beta']),
'imag_coherence': (3, ['delta', 'theta', 'alpha', 'beta']), 'corr_cross_correlation': (4, [None]), 'wpli': (5, ['delta', 'theta', 'alpha', 'beta']),
'plv': (6, ['delta', 'theta', 'alpha', 'beta']), 'pli': (7, [None]),
'power_spectrum': (8, ['delta', 'theta', 'alpha', 'beta']), 'spectral_entropy': (9, ['delta', 'theta', 'alpha', 'beta']),
'shannon_entropy': (10, [None])}
"""
connectivity_measures = {'pearson_correlation': (0, [None])} #CONNECTIVITY MEASURES USED
path = 'data' #<--------------- PATH TO FOLDER CONTAINING EEGs
window_size_class_0 = 1 #<--------------- CLASS 0 WINDOW SIZE
window_size_class_1 = 1 #<--------------- CLASS 1 WINDOW SIZE
con_number_total = 1 #<--------------- NUMBER OF CONNECTIVITY MEASURES USED, MUST BE THE SAME AS LENGTH OF DICTIONARY 'connectivity_measures'
graphs_class_0, graphs_class_1 = open_data_directories(path, window_size_class_0, window_size_class_1, con_number_total)
print('\n=========================================')
print('Total graphs Generated for class 0: ', len(graphs_class_0))
print('Total graphs Generated for class 1: ', len(graphs_class_1))
graphs = [graphs_class_0, graphs_class_1]
#2) Visualize graphs
#============================================================================================================================================================================
def visualize_graphs(graphs, selected):
G = eegraph.Graph()
for i in range(selected[0], selected[1]+1):
G.visualize(graphs[i])
wanted = [0, 0] # Graph position
visualize_graphs(graphs_class_1, wanted)
#3)Histogram
#============================================================================================================================================================================
def edges_histogram(graphs, label):
total_edges, edges_dict = [], {}
for i in range(len(graphs)):
edges = [e for e in graphs[i].edges]
edges_dict[str(i+1)] = len(edges)
keys = edges_dict.keys()
values = edges_dict.values()
plt.figure(figsize=(30,15))
plt.title('Histogram: Edges per Graph. Class ' + str(label), fontsize=20)
plt.hist(values, bins=max(values)+1-min(values))
plt.xlabel('Number of edges')
plt.ylabel('Count')
#plt.bar(keys, values, align='center')
plt.show()
print('\n=====================================================================')
for j in range(2):
edges_histogram(graphs[j], j)
#4)Empty graphs
#============================================================================================================================================================================
def empty_graphs(graphs):
empty_graphs, empty_dict = 0, {}
for i in range(len(graphs)):
if(nx.is_empty(graphs[i])):
empty_dict[i] = True
empty_graphs += 1
else:
empty_dict[i] = False
return empty_graphs, empty_dict
print('\n=====================================================================')
empty_amount, graphs_dict = [None]*2, [None]*2
for j in range(2):
empty_amount[j], graphs_dict[j] = empty_graphs(graphs[j])
print('\nNumber of Empty graphs. Class ' + str(j) + ': ' , empty_amount[j])
print('Empty graphs (True).', graphs_dict[j])
#5)Erase Empty Graphs
#============================================================================================================================================================================
def delete_graphs(graphs, graphs_dict):
for key,value in reversed(graphs_dict.items()):
if(value):
print('Deleting graph in index:', str(key))
del graphs[key]
return graphs
print('\n=====================================================================')
print('Deleting empty graphs.')
for j in range(2):
if (empty_amount[j]):
print('\nGraphs in Class', j, ':')
graphs[j] = delete_graphs(graphs[j], graphs_dict[j])
print('\nTotal graphs for class 0: ', len(graphs[0]))
print('Total graphs for class 1: ', len(graphs[1]))
#6)Mean value and Standard Deviation for graphs
#============================================================================================================================================================================
def mean_std(graphs):
edges_weights, edges_dict = [], {}
for i in range(len(graphs)):
edges = [d.get('weight') for e1,e2,d in graphs[i].edges(data=True)]
edges_weights = edges_weights + edges
print('Mean:', round(np.mean(edges_weights),5))
print('STD:', round(np.std(edges_weights),5))
print('\n=====================================================================')
print('Mean values and Standar Deviation for edges in the graphs.')
for j in range(2):
print('\nClass', j, ':')
mean_std(graphs[j])
| 0.362856 | 0.57075 |
## When do you start developing a pipeline?
A typical research plan that a student might present looks like
Order supplies --> Build a device --> Collect some data --> Analyze said data --> ~~Profit~~ Publish
This process is a pipeline! But it's out of order. Let's find out how with a real-world example.
### A rat behavior experiment
A post-doc wants to do a rat behavior experiment. For the experiment to work, it's critical to know whether or not the rat is moving at any given point in time. Each experiment runs for over an hour, but our post-doc reasons that she can set up a video camera synchronized to the rest of the experimental equipment, record the rat moving around its habitat, and determine when motion happened later, perhaps by doing some image processing. A labmate suggests she attach a bright red light to the rat's head to make it easy to track by computer - just find the reddest part in the red channel at each frame! Our post-doc does this, then collects more than 30 hours worth of data over two weeks.
```
%matplotlib inline
```
To work with video data, have a new import today: `moviepy`. This is a library that helps you interact with movie files. Video file formats utilize lossy compression of some kind, but for something like a rat moving around, where we're not exactly trying to make quantitative measurements of photon counts, it's preferable to save space and take compression losses. Even for display in a paper's supplement, video formats like mp4 and mpeg are useful.
```
from moviepy import editor as mpy
vid_file_name = "../data/rattrack.mp4"
vid = mpy.VideoFileClip(vid_file_name)
mpy.ipython_display(vid, width=480)
```
How would you begin processing this data to track the rat? What's our strategy going to be? What's different about this data compared to what we've seen before?
Let's look at the data in detail.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('dark')
```
To extract the frames in the video, we will use the `VideoFileClip.get_frame(t)` function or `VideFileClip.iter_frames()`.
```
vid.get_frame?
vid.iter_frames?
```
Let's start with the very first frame at time $t = 0$.
```
frame = vid.get_frame(0)
```
This is a video in color. How many dimensions do we expect this frame to have?
```
frame.shape
```
As a first step, let's see if we can in fact find a red dot in a still frame.
```
plt.imshow(frame)
```
And just the red channel?
```
red = frame[:,:,0]
plt.imshow(red)
```
This looks tractable! How might we grab the brightest point?
```
index = np.argmax(red)
print(index)
```
What does this number mean? It turns out for that multidimensional arrays, `argmax` just tells you the position of the maximum value _if the array had first been flattened_. The brilliant minds behind Numpy know this probably isn't what you want, so they've provided you with `numpy.unravel_index`. It just needs to know the original array shape.
```
row_index, column_index = np.unravel_index(index, red.shape)
```
It turns out we can help ourselves visualize that point by plotting directly on top of an image we made with imshow. `plt.plot` takes a list of $x$, $y$ values, and a style parameter (e.g. 'o' makes the points little circles) - it works exactly like Matlab's `plot` if you're familiar with that.
**Exercise** how would you change the color of this pixel to make it stand out in the original color image?
```
plt.imshow(frame)
plt.plot(column_index, row_index, 'o')
# Fix up the margins - plotting expands the plot area
plt.xlim([0, frame.shape[1]])
plt.ylim([frame.shape[0], 0])
# smooth
# argmax
```
Now what?
We need to do this for every frame in the video.
```
num_frames = vid.duration * vid.fps
# a time x dim table. dim = 2 since we have 2D frames
dot_position = np.zeros((num_frames, 2))
for frame_num, frame in enumerate(vid.iter_frames()):
red = frame[:,:,0]
smooth = red# smoothing
peak = np.argmax(smooth)
position = np.unravel_index(peak, red.shape)
dot_position[frame_num, :] = position # set the whole row of the table
```
**Exercise** How might you visualize how the rat is moving with time?
There are two approaches to solving this: look at x and y position independently against time, or look at the path the animal takes through time. Both are shown below.
```
plt.plot(dot_position)
plt.plot(dot_position[:,0], dot_position[:,1])
```
Look at those jumps! We have a teleporting rat! Let's look at a time where the rat seems to teleport. Surely we are zeroing in on a major scientific discovery.
```
jump_size = np.zeros(num_frames - 1)
for t in range(0, int(num_frames - 1)):
jump_size[t] = np.sqrt(np.sum(np.square(dot_position[t+1,:] - dot_position[t,:])))
biggest_jump_t = np.argmax(jump_size) + 1
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
before = biggest_jump_t - 1
after = biggest_jump_t
ax[0].plot(dot_position[before,1], dot_position[before,0], 'o', fillstyle='none', markersize=20, markeredgewidth=5)
ax[0].imshow(vid.get_frame((before)/ vid.fps))
ax[1].plot(dot_position[after,1], dot_position[after,0], 'o', fillstyle='none', markersize=20, markeredgewidth=5)
ax[1].imshow(vid.get_frame(after / vid.fps))
print("Time of biggest jump: {} seconds".format(biggest_jump_t / vid.fps))
```
So what is happening at that time?
```
toi = biggest_jump_t / vid.fps # TOI = time of interest
short_clip = vid.subclip(toi - 1, toi + 2)
mpy.ipython_display(short_clip, width=480, loop=1)
```
The problem seems to be that the animal has room to turn its head and occulude the light with its head or the wires it's attached to. In the period where the light is occluded we need to either guess that it doesn't move until we find it again, or use some other tracking mechanism. It's also potentially hard to tell when we lose the light - right now we just look for the brighest red pixel, which will always exist. In fact, a white pixel looks pretty bright in the red channel.
Although there are some improvements we can make, it turns out that this problem went from very easy to very hard.
That's because the real problem occured weeks ago, here: "Collect some data --> Analyze said data". Collecting and analyzing data are not separable steps. You should be building your data processing pipeline while you're building your experiment, and iterating on it as you're collecting data.
|
github_jupyter
|
%matplotlib inline
from moviepy import editor as mpy
vid_file_name = "../data/rattrack.mp4"
vid = mpy.VideoFileClip(vid_file_name)
mpy.ipython_display(vid, width=480)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('dark')
vid.get_frame?
vid.iter_frames?
frame = vid.get_frame(0)
frame.shape
plt.imshow(frame)
red = frame[:,:,0]
plt.imshow(red)
index = np.argmax(red)
print(index)
row_index, column_index = np.unravel_index(index, red.shape)
plt.imshow(frame)
plt.plot(column_index, row_index, 'o')
# Fix up the margins - plotting expands the plot area
plt.xlim([0, frame.shape[1]])
plt.ylim([frame.shape[0], 0])
# smooth
# argmax
num_frames = vid.duration * vid.fps
# a time x dim table. dim = 2 since we have 2D frames
dot_position = np.zeros((num_frames, 2))
for frame_num, frame in enumerate(vid.iter_frames()):
red = frame[:,:,0]
smooth = red# smoothing
peak = np.argmax(smooth)
position = np.unravel_index(peak, red.shape)
dot_position[frame_num, :] = position # set the whole row of the table
plt.plot(dot_position)
plt.plot(dot_position[:,0], dot_position[:,1])
jump_size = np.zeros(num_frames - 1)
for t in range(0, int(num_frames - 1)):
jump_size[t] = np.sqrt(np.sum(np.square(dot_position[t+1,:] - dot_position[t,:])))
biggest_jump_t = np.argmax(jump_size) + 1
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
before = biggest_jump_t - 1
after = biggest_jump_t
ax[0].plot(dot_position[before,1], dot_position[before,0], 'o', fillstyle='none', markersize=20, markeredgewidth=5)
ax[0].imshow(vid.get_frame((before)/ vid.fps))
ax[1].plot(dot_position[after,1], dot_position[after,0], 'o', fillstyle='none', markersize=20, markeredgewidth=5)
ax[1].imshow(vid.get_frame(after / vid.fps))
print("Time of biggest jump: {} seconds".format(biggest_jump_t / vid.fps))
toi = biggest_jump_t / vid.fps # TOI = time of interest
short_clip = vid.subclip(toi - 1, toi + 2)
mpy.ipython_display(short_clip, width=480, loop=1)
| 0.380644 | 0.979056 |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
df = pd.read_csv("wat-all.csv")
df
dff = pd.DataFrame(df['packet_address'], columns=['packet_address'])
le = LabelEncoder()
encode = dff[dff.columns[:]].apply(le.fit_transform)
df['packet_address_id'] = encode
df
df.corr()
plt.figure(figsize=(15,15))
sns.heatmap(df.corr(), annot = True)
plt.show()
train_X = df.drop(columns=['target','time','packet_address','packet_address_id'])
sns.distplot(df['router'], kde = False, bins=30, color='blue')
sns.distplot(df['src_router'], kde = False, bins=30, color='orange')
sns.distplot(df['dst_router'], kde = False, bins=30, color='red')
sns.distplot(df['inport'], kde = False, bins=30, color='green')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
sns.distplot(df['outport'], kde = False, bins=30, color='black')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
sns.distplot(df['packet_type'], kde = False, bins=30, color='grey')
data = {'GETX': 0,'DATA': 1, 'PUTX': 2, 'WB_ACK':3}
# scatter plot
fig, ax = plt.subplots()
ax.scatter(df['router'], df['time'])
# set a title and labels
ax.set_xlabel('router')
ax.set_ylabel('time')
df_500 = pd.read_csv('wat-all.csv',nrows=500)
# scatter plot 500
fig, ax = plt.subplots()
ax.scatter(df_500['router'], df_500['time'])
# set a title and labels
ax.set_xlabel('router')
ax.set_ylabel('time')
# bar chart by router
fig, ax = plt.subplots()
# count the occurrence of each class
data = df_500['router'].value_counts()
# get x and y data
points = data.index
frequency = data.values
# create bar chart
ax.bar(points, frequency)
# set title and labels
ax.set_xlabel('Routers')
ax.set_ylabel('Frequency')
# bar chart by time
fig, ax = plt.subplots()
# count the occurrence of each class
data = df_500['time'].value_counts()
# get x and y data
points = data.index
frequency = data.values
# create bar chart
ax.bar(points, frequency)
# set title and labels
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
#standardization
x = train_X.values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
train_X = pd.DataFrame(x_scaled)
train_X
corr_df = pd.concat([train_X, df[['target']]], axis = 1)
corr_df.corr()
train_Y = corr_df['target']
train_Y.value_counts()
```
#### machine learning models
```
X_train, X_test, y_train, y_test = train_test_split(train_X, train_Y, test_size=0.3, random_state=0)
#logistic regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import statsmodels.api as sm
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
logit_model=sm.Logit(train_Y,train_X)
result=logit_model.fit()
print(result.summary2())
logreg = LogisticRegression(C=1,penalty='l2',random_state=42)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy {:.2f}'.format(accuracy_score(y_test,y_pred)))
logreg_score_train = logreg.score(X_train,y_train)
print("Train Prediction Score",logreg_score_train*100)
logreg_score_test = accuracy_score(y_test,y_pred)
print("Test Prediction ",logreg_score_test*100)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(classification_report(y_test, y_pred))
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
#KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train,y_train)
y_pred_knn= knn.predict(X_test)
knn_score_train = knn.score(X_train,y_train)
print("Train Prediction Score",knn_score_train*100)
knn_score_test = accuracy_score(y_test,y_pred_knn)
print("Test Prediction ",knn_score_test*100)
cm = confusion_matrix(y_test, y_pred_knn)
print(cm)
print(classification_report(y_test,y_pred_knn))
logit_roc_auc = roc_auc_score(y_test, y_pred_knn)
fpr, tpr, thresholds = roc_curve(y_test, knn.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='KNeighbors (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
#supportvectormachines
from sklearn.svm import SVC
ksvc = SVC(kernel = 'rbf',random_state = 42,probability=True)
ksvc.fit(X_train,y_train)
y_pred_ksvc= ksvc.predict(X_test)
ksvc_score_train = ksvc.score(X_train,y_train)
print("Train Prediction Score",ksvc_score_train*100)
ksvc_score_test = accuracy_score(y_test,y_pred_ksvc)
print("Test Prediction Score",ksvc_score_test*100)
cm = confusion_matrix(y_test, y_pred_ksvc)
print(cm)
print(classification_report(y_test,y_pred_ksvc))
#naive_bayes
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train,y_train)
y_pred_nb= nb.predict(X_test)
nb_score_train = nb.score(X_train,y_train)
print("Train Prediction Score",nb_score_train*100)
nb_score_test = accuracy_score(y_test,y_pred_nb)
print("Test Prediction Score",nb_score_test*100)
cm = confusion_matrix(y_test, y_pred_nb)
print(cm)
print(classification_report(y_test,y_pred_nb))
#neuralnetwork
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
#2layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(2, activation='relu', input_shape=(n_cols,)))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=10, validation_split=0.4 )
#3layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(4, activation='relu', input_shape=(n_cols,)))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=20, validation_split=0.4 )
#4layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(8, activation='relu', input_shape=(n_cols,)))
model.add(Dense(4, activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=20, validation_split=0.2 )
#4layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(16, activation='relu', input_shape=(n_cols,)))
model.add(Dense(8, activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=20, validation_split=0.4 )
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
df = pd.read_csv("wat-all.csv")
df
dff = pd.DataFrame(df['packet_address'], columns=['packet_address'])
le = LabelEncoder()
encode = dff[dff.columns[:]].apply(le.fit_transform)
df['packet_address_id'] = encode
df
df.corr()
plt.figure(figsize=(15,15))
sns.heatmap(df.corr(), annot = True)
plt.show()
train_X = df.drop(columns=['target','time','packet_address','packet_address_id'])
sns.distplot(df['router'], kde = False, bins=30, color='blue')
sns.distplot(df['src_router'], kde = False, bins=30, color='orange')
sns.distplot(df['dst_router'], kde = False, bins=30, color='red')
sns.distplot(df['inport'], kde = False, bins=30, color='green')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
sns.distplot(df['outport'], kde = False, bins=30, color='black')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
sns.distplot(df['packet_type'], kde = False, bins=30, color='grey')
data = {'GETX': 0,'DATA': 1, 'PUTX': 2, 'WB_ACK':3}
# scatter plot
fig, ax = plt.subplots()
ax.scatter(df['router'], df['time'])
# set a title and labels
ax.set_xlabel('router')
ax.set_ylabel('time')
df_500 = pd.read_csv('wat-all.csv',nrows=500)
# scatter plot 500
fig, ax = plt.subplots()
ax.scatter(df_500['router'], df_500['time'])
# set a title and labels
ax.set_xlabel('router')
ax.set_ylabel('time')
# bar chart by router
fig, ax = plt.subplots()
# count the occurrence of each class
data = df_500['router'].value_counts()
# get x and y data
points = data.index
frequency = data.values
# create bar chart
ax.bar(points, frequency)
# set title and labels
ax.set_xlabel('Routers')
ax.set_ylabel('Frequency')
# bar chart by time
fig, ax = plt.subplots()
# count the occurrence of each class
data = df_500['time'].value_counts()
# get x and y data
points = data.index
frequency = data.values
# create bar chart
ax.bar(points, frequency)
# set title and labels
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
#standardization
x = train_X.values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
train_X = pd.DataFrame(x_scaled)
train_X
corr_df = pd.concat([train_X, df[['target']]], axis = 1)
corr_df.corr()
train_Y = corr_df['target']
train_Y.value_counts()
X_train, X_test, y_train, y_test = train_test_split(train_X, train_Y, test_size=0.3, random_state=0)
#logistic regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import statsmodels.api as sm
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
logit_model=sm.Logit(train_Y,train_X)
result=logit_model.fit()
print(result.summary2())
logreg = LogisticRegression(C=1,penalty='l2',random_state=42)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy {:.2f}'.format(accuracy_score(y_test,y_pred)))
logreg_score_train = logreg.score(X_train,y_train)
print("Train Prediction Score",logreg_score_train*100)
logreg_score_test = accuracy_score(y_test,y_pred)
print("Test Prediction ",logreg_score_test*100)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(classification_report(y_test, y_pred))
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
#KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train,y_train)
y_pred_knn= knn.predict(X_test)
knn_score_train = knn.score(X_train,y_train)
print("Train Prediction Score",knn_score_train*100)
knn_score_test = accuracy_score(y_test,y_pred_knn)
print("Test Prediction ",knn_score_test*100)
cm = confusion_matrix(y_test, y_pred_knn)
print(cm)
print(classification_report(y_test,y_pred_knn))
logit_roc_auc = roc_auc_score(y_test, y_pred_knn)
fpr, tpr, thresholds = roc_curve(y_test, knn.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='KNeighbors (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
#supportvectormachines
from sklearn.svm import SVC
ksvc = SVC(kernel = 'rbf',random_state = 42,probability=True)
ksvc.fit(X_train,y_train)
y_pred_ksvc= ksvc.predict(X_test)
ksvc_score_train = ksvc.score(X_train,y_train)
print("Train Prediction Score",ksvc_score_train*100)
ksvc_score_test = accuracy_score(y_test,y_pred_ksvc)
print("Test Prediction Score",ksvc_score_test*100)
cm = confusion_matrix(y_test, y_pred_ksvc)
print(cm)
print(classification_report(y_test,y_pred_ksvc))
#naive_bayes
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train,y_train)
y_pred_nb= nb.predict(X_test)
nb_score_train = nb.score(X_train,y_train)
print("Train Prediction Score",nb_score_train*100)
nb_score_test = accuracy_score(y_test,y_pred_nb)
print("Test Prediction Score",nb_score_test*100)
cm = confusion_matrix(y_test, y_pred_nb)
print(cm)
print(classification_report(y_test,y_pred_nb))
#neuralnetwork
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
#2layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(2, activation='relu', input_shape=(n_cols,)))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=10, validation_split=0.4 )
#3layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(4, activation='relu', input_shape=(n_cols,)))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=20, validation_split=0.4 )
#4layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(8, activation='relu', input_shape=(n_cols,)))
model.add(Dense(4, activation='relu'))
model.add(Dense(2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=20, validation_split=0.2 )
#4layer
model = Sequential()
n_cols = X_train.shape[1]
n_cols
model.add(Dense(16, activation='relu', input_shape=(n_cols,)))
model.add(Dense(8, activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])
early_stopping_monitor = EarlyStopping(patience=20)
model.fit(X_train, y_train, epochs=20, validation_split=0.4 )
| 0.656878 | 0.678473 |
```
from sklearn.datasets import make_blobs, make_moons
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from scipy.stats import pearsonr, spearmanr
def interval_transform(x, a, b):
m = x.min()
ma = x.max()
alpha_inv = (1 - m/ma)*ma/(a - b)
alpha = 1/alpha_inv
beta = b - alpha*m
f = lambda x: alpha*x + beta
return f(x)
def make_noise_feature(x):
n_features = x.shape[1]
n_samples = x.shape[0]
weights = np.random.uniform(1e-4, 1e-2, n_features)
noise = np.random.normal(1, 5, n_samples)
signal = np.sum(weights*x, -1)
return signal + noise
def calculate_pvalues(df,
method = spearmanr
):
"""
Assumes df with only numeric entries clean of null entries.
"""
dfcols = pd.DataFrame(columns=df.columns)
pvalues = dfcols.transpose().join(dfcols, how='outer')
for r in df.columns:
for c in df.columns:
pvalues[r][c] = round(method(df[r], df[c])[1], 4)
return pvalues
def correlation_matrix(df,
method = "pearson",
annot_bool = False,
annot_size = 20
):
# Compute the correlation matrix
corr = df.corr(method = method)
if annot_bool:
annot = corr.copy()
if method == "pearson":
sig_meth = pearsonr
else:
sig_meth = spearmanr
pval = calculate_pvalues(df, sig_meth)
# create three masks
r0 = corr.applymap(lambda x: '{:.2f}'.format(x))
r1 = corr.applymap(lambda x: '{:.2f}*'.format(x))
r2 = corr.applymap(lambda x: '{:.2f}**'.format(x))
r3 = corr.applymap(lambda x: '{:.2f}***'.format(x))
# apply them where appropriate --this could be a single liner
annot = annot.where(pval>0.1,r0)
annot = annot.where(pval<=0.1,r1)
annot = annot.where(pval<=0.05,r2)
annot = annot.mask(pval<=0.01,r3)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 11))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},
annot = annot,
fmt = "",
annot_kws={"size": annot_size},
vmin = -1,
vmax = 1,
)
n_info = 3
n_redu = 0
n_samples=2000
#making nonlinear decision boundaries requires multiple blob like features
X1, y1 = make_blobs(
n_samples=n_samples,
n_features=2,
centers=np.array([[42, 39], [39.5, 38.3]]),
shuffle=False,
random_state=42,
#difficulty,
cluster_std=1.4,
)
X2, y2 = make_blobs(
n_samples=n_samples,
n_features=2,
centers=np.array([[44, 39.8], [38, 37.9]]),
cluster_std=1.2,
shuffle=False,
random_state=6,
#difficulty,
)
X3, y3 = make_moons(n_samples=2*n_samples, noise=1, random_state=42)
X = np.concatenate([X1, X2], axis=0)
y = np.concatenate([y1, y2], axis=0)
data = np.concatenate([X, np.expand_dims(y, -1)], -1)
data = pd.DataFrame(data)
print(X.shape)
data.head()
correlation_matrix(data, annot_bool=True, annot_size=15)
plt.hist(y)
```
To educationify the data we scale and transform the dataset:
For the informative features we'll use:
* GPA [0, 4] unit:grade
* Attendance [0, 100] unit:percent
* Passed percent of classes [0, 100] unit:percent
For the redundant we'll use:
* Sex [0, 1] unit:integer class
* Ethnicity [0, 1, 2] unit:integer class
* HSGPA [0, 4] unit:grade
```
fig, axs = plt.subplots(nrows=n_info, figsize=(5, 10 ))
for i in range(n_info):
ax = axs[i]
data[i].plot(kind="hist", ax=ax)
gpa_column = interval_transform(data[0], 1, 4)
plt.hist(gpa_column)
passed_column = interval_transform(data[1], 0, 100)
plt.hist(passed_column)
gpa_column.shape
full_data = np.concatenate(
[
np.expand_dims(gpa_column, axis=-1),
np.expand_dims(passed_column, axis=-1),
np.expand_dims(y, axis=-1)
],
axis=1
)
columns = [
"cGPA",
"passed_percent",
"degree",
]
df_full = pd.DataFrame(full_data,
columns=columns)
df_full.head()
comb = [(1, 0),]
fig, axs = plt.subplots(nrows=len(comb), figsize=(10, 7))
for i in range(len(comb)):
sns.scatterplot(full_data[:,comb[i][0]], full_data[:,comb[i][1]], hue=y, ax=axs)
correlation_matrix(df_full, annot_bool=True, annot_size=15)
t_X = X.copy()
fd = full_data[:, :-1].copy()
for i in range(t_X.shape[1]):
t_X[:,i] = (t_X[:,i] - t_X[:,i].mean())/t_X[:,i].std()
for i in [0, 1, -1]:
fd[:,i] = (fd[:,i] - fd[:,i].mean())/fd[:,i].std()
#x_train, x_test, y_train, y_test = train_test_split(t_X, y, shuffle=True)
x_train, x_test, y_train, y_test = train_test_split(fd, full_data[:,-1], shuffle=True)
model = KMeans(n_clusters=2)
model.fit(fd)
def kmeans_plot(df,labels,centers):
x_min, x_max = df[:, 1].min() - 1, df[:, 1].max() + 1
y_min, y_max = df[:, 0].min() - 1, df[:, 0].max() + 1
plt.scatter(df[:, 1], df[:, 0], c=labels, cmap = 'viridis')
plt.scatter(centers[:, 1], centers[:, 0], marker='x', c = 'k')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
title = "KMeans Clustering of an education-like dataset with "+str(len(centers))+" clusters"
plt.title(title)
plt.show()
kmeans_plot(x_test, model.predict(x_test), model.cluster_centers_)
print("KM model",
adjusted_rand_score(y_test, model.predict(x_test)),
adjusted_mutual_info_score(y_test, model.predict(x_test)))
pd.to_pickle(df_full, "clustering_data.pkl")
```
|
github_jupyter
|
from sklearn.datasets import make_blobs, make_moons
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from scipy.stats import pearsonr, spearmanr
def interval_transform(x, a, b):
m = x.min()
ma = x.max()
alpha_inv = (1 - m/ma)*ma/(a - b)
alpha = 1/alpha_inv
beta = b - alpha*m
f = lambda x: alpha*x + beta
return f(x)
def make_noise_feature(x):
n_features = x.shape[1]
n_samples = x.shape[0]
weights = np.random.uniform(1e-4, 1e-2, n_features)
noise = np.random.normal(1, 5, n_samples)
signal = np.sum(weights*x, -1)
return signal + noise
def calculate_pvalues(df,
method = spearmanr
):
"""
Assumes df with only numeric entries clean of null entries.
"""
dfcols = pd.DataFrame(columns=df.columns)
pvalues = dfcols.transpose().join(dfcols, how='outer')
for r in df.columns:
for c in df.columns:
pvalues[r][c] = round(method(df[r], df[c])[1], 4)
return pvalues
def correlation_matrix(df,
method = "pearson",
annot_bool = False,
annot_size = 20
):
# Compute the correlation matrix
corr = df.corr(method = method)
if annot_bool:
annot = corr.copy()
if method == "pearson":
sig_meth = pearsonr
else:
sig_meth = spearmanr
pval = calculate_pvalues(df, sig_meth)
# create three masks
r0 = corr.applymap(lambda x: '{:.2f}'.format(x))
r1 = corr.applymap(lambda x: '{:.2f}*'.format(x))
r2 = corr.applymap(lambda x: '{:.2f}**'.format(x))
r3 = corr.applymap(lambda x: '{:.2f}***'.format(x))
# apply them where appropriate --this could be a single liner
annot = annot.where(pval>0.1,r0)
annot = annot.where(pval<=0.1,r1)
annot = annot.where(pval<=0.05,r2)
annot = annot.mask(pval<=0.01,r3)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 11))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},
annot = annot,
fmt = "",
annot_kws={"size": annot_size},
vmin = -1,
vmax = 1,
)
n_info = 3
n_redu = 0
n_samples=2000
#making nonlinear decision boundaries requires multiple blob like features
X1, y1 = make_blobs(
n_samples=n_samples,
n_features=2,
centers=np.array([[42, 39], [39.5, 38.3]]),
shuffle=False,
random_state=42,
#difficulty,
cluster_std=1.4,
)
X2, y2 = make_blobs(
n_samples=n_samples,
n_features=2,
centers=np.array([[44, 39.8], [38, 37.9]]),
cluster_std=1.2,
shuffle=False,
random_state=6,
#difficulty,
)
X3, y3 = make_moons(n_samples=2*n_samples, noise=1, random_state=42)
X = np.concatenate([X1, X2], axis=0)
y = np.concatenate([y1, y2], axis=0)
data = np.concatenate([X, np.expand_dims(y, -1)], -1)
data = pd.DataFrame(data)
print(X.shape)
data.head()
correlation_matrix(data, annot_bool=True, annot_size=15)
plt.hist(y)
fig, axs = plt.subplots(nrows=n_info, figsize=(5, 10 ))
for i in range(n_info):
ax = axs[i]
data[i].plot(kind="hist", ax=ax)
gpa_column = interval_transform(data[0], 1, 4)
plt.hist(gpa_column)
passed_column = interval_transform(data[1], 0, 100)
plt.hist(passed_column)
gpa_column.shape
full_data = np.concatenate(
[
np.expand_dims(gpa_column, axis=-1),
np.expand_dims(passed_column, axis=-1),
np.expand_dims(y, axis=-1)
],
axis=1
)
columns = [
"cGPA",
"passed_percent",
"degree",
]
df_full = pd.DataFrame(full_data,
columns=columns)
df_full.head()
comb = [(1, 0),]
fig, axs = plt.subplots(nrows=len(comb), figsize=(10, 7))
for i in range(len(comb)):
sns.scatterplot(full_data[:,comb[i][0]], full_data[:,comb[i][1]], hue=y, ax=axs)
correlation_matrix(df_full, annot_bool=True, annot_size=15)
t_X = X.copy()
fd = full_data[:, :-1].copy()
for i in range(t_X.shape[1]):
t_X[:,i] = (t_X[:,i] - t_X[:,i].mean())/t_X[:,i].std()
for i in [0, 1, -1]:
fd[:,i] = (fd[:,i] - fd[:,i].mean())/fd[:,i].std()
#x_train, x_test, y_train, y_test = train_test_split(t_X, y, shuffle=True)
x_train, x_test, y_train, y_test = train_test_split(fd, full_data[:,-1], shuffle=True)
model = KMeans(n_clusters=2)
model.fit(fd)
def kmeans_plot(df,labels,centers):
x_min, x_max = df[:, 1].min() - 1, df[:, 1].max() + 1
y_min, y_max = df[:, 0].min() - 1, df[:, 0].max() + 1
plt.scatter(df[:, 1], df[:, 0], c=labels, cmap = 'viridis')
plt.scatter(centers[:, 1], centers[:, 0], marker='x', c = 'k')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
title = "KMeans Clustering of an education-like dataset with "+str(len(centers))+" clusters"
plt.title(title)
plt.show()
kmeans_plot(x_test, model.predict(x_test), model.cluster_centers_)
print("KM model",
adjusted_rand_score(y_test, model.predict(x_test)),
adjusted_mutual_info_score(y_test, model.predict(x_test)))
pd.to_pickle(df_full, "clustering_data.pkl")
| 0.775775 | 0.883989 |
## Probabilistic tractography
Probabilistic fiber tracking is a way of reconstructing the white matter
structural connectivity using diffusion MRI data. Much like deterministic fiber
tracking, the probabilistic approach follows the trajectory of a possible
pathway in a step-wise fashion and propagating streamlines based on the local
orientations reconstructed at each voxel.
In probabilistic tracking, however, the tracking direction at each point along
the path is chosen at random from a distribution of possible directions, and
thus is no longer deterministic. The distribution at each point is different and
depends on the observed diffusion data at that point. The distribution of
tracking directions at each point can be represented as a probability mass
function (PMF) if the possible tracking directions are restricted to a set of
directions distributed points on a sphere.
Like their deterministic counterparts, probabilistic tracking methods start
propagating streamlines from a *seed map*, which contains a number of
coordinates per voxel to initiate the procedure. The higher the number of seeds
per voxel (i.e. the seed density), the larger will be the number of potentially
recovered long-range connections. However, this comes at the cost of a longer
running time.
This episode builds on top of the results of the CSD local orientation
reconstruction method presented in a previous episode.
We will first get the necessary diffusion data, and compute the local
orientation information using the CSD method:
```
import os
import nibabel as nib
import numpy as np
from bids.layout import BIDSLayout
from dipy.core.gradients import gradient_table
from dipy.io.gradients import read_bvals_bvecs
# Get the diffusion files
dwi_layout = BIDSLayout(
'../data/ds000221/derivatives/uncorrected_topup_eddy/', validate=False)
gradient_layout = BIDSLayout(
'../data/ds000221/sub-010006/ses-01/dwi/', validate=False)
subj = '010006'
dwi_fname = dwi_layout.get(subject=subj, suffix='dwi',
extension='.nii.gz', return_type='file')[0]
bval_fname = gradient_layout.get(
subject=subj, suffix='dwi', extension='.bval', return_type='file')[0]
bvec_fname = dwi_layout.get(
subject=subj, extension='.eddy_rotated_bvecs', return_type='file')[0]
dwi_img = nib.load(dwi_fname)
affine = dwi_img.affine
bvals, bvecs = read_bvals_bvecs(bval_fname, bvec_fname)
gtab = gradient_table(bvals, bvecs)
```
We will now create the seeding mask and the seeds using an estimate of the
white matter tissue based on the FA values obtained from the diffusion tensor:
```
from dipy.reconst import dti
from dipy.segment.mask import median_otsu
from dipy.tracking import utils
dwi_data = dwi_img.get_fdata()
# Specify the volume index to the b0 volumes
dwi_data, dwi_mask = median_otsu(dwi_data, vol_idx=[0], numpass=1)
dti_model = dti.TensorModel(gtab)
# This step may take a while
dti_fit = dti_model.fit(dwi_data, mask=dwi_mask)
# Create the seeding mask
fa_img = dti_fit.fa
seed_mask = fa_img.copy()
seed_mask[seed_mask >= 0.2] = 1
seed_mask[seed_mask < 0.2] = 0
# Create the seeds
seeds = utils.seeds_from_mask(seed_mask, affine=affine, density=1)
```
We will now estimate the FRF and set the CSD model to feed the local orientation
information to the streamline propagation object:
```
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response_ssst)
response, ratio = auto_response_ssst(gtab, dwi_data, roi_radii=10, fa_thr=0.7)
sh_order = 2
csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order)
csd_fit = csd_model.fit(dwi_data, mask=seed_mask)
```
Tracking methods are provided with a criterion to stop propagating streamlines
beyond non-white matter tissues. One way to do this is to use the Generalized
Fractional Anisotropy (GFA). Much like the Fractional Anisotropy issued by the
DTI model measures anisotropy, the GFA uses samples of the ODF to quantify the
anisotropy of tissues, and hence, it provides an estimation of the underlying
tissue type.
```
from scipy import ndimage # To rotate image for visualization purposes
import matplotlib.pyplot as plt
from dipy.reconst.shm import CsaOdfModel
from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
csa_model = CsaOdfModel(gtab, sh_order=sh_order)
gfa = csa_model.fit(dwi_data, mask=seed_mask).gfa
stopping_criterion = ThresholdStoppingCriterion(gfa, .25)
# Create the directory to save the results
out_dir = '../data/ds000221/derivatives/dwi/tractography/sub-%s/ses-01/dwi/' % subj
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Save the GFA
gfa_img = nib.Nifti1Image(gfa.astype(np.float32), affine)
nib.save(gfa_img, os.path.join(out_dir, 'gfa.nii.gz'))
# Plot the GFA
%matplotlib inline
fig, ax = plt.subplots(1, 3, figsize=(10, 10))
ax[0].imshow(ndimage.rotate(gfa[:, gfa.shape[1]//2, :], 90, reshape=False))
ax[1].imshow(ndimage.rotate(gfa[gfa.shape[0]//2, :, :], 90, reshape=False))
ax[2].imshow(ndimage.rotate(gfa[:, :, gfa.shape[-1]//2], 90, reshape=False))
fig.savefig(os.path.join(out_dir, "gfa.png"), dpi=300, bbox_inches="tight")
plt.show()
```
The GFA threshold stopping criterion value must be adjusted to the data in
order to avoid creating a mask that will exclude white matter areas (which
would result in streamlines being unable to propagate to other white matter
areas). Visually inspecting the GFA map might provide with a sufficient
guarantee about the goodness of the value.
The Fiber Orientation Distribution (FOD) of the CSD model estimates the
distribution of small fiber bundles within each voxel. We can use this
distribution for probabilistic fiber tracking. One way to do this is to
represent the FOD using a discrete sphere. This discrete FOD can be used by the
``ProbabilisticDirectionGetter`` as a PMF for sampling tracking directions. We
need to clip the FOD to use it as a PMF because the latter cannot have negative
values. Ideally, the FOD should be strictly positive, but because of noise
and/or model failures sometimes it can have negative values.
The set of possible directions to choose to propagate a streamline is restricted
by a cone angle $\theta$, named `max_angle` in `DIPY`'s
`ProbabilisticDirectionGetter::from_pmf` method.
Another relevant parameter of the propagation is the step size, which dictates
how much the propagation will advance to the next point. Note that it is a real
number, since the tracking procedure operates in physical coordinates.
Note that the `LocalTracking` class accepts a `StoppingCriterion` class instance
as its second argument, and thus a different criterion can be used if the GFA
criterion does not fit into our framework, or if different data is available in
our workflow.
```
from dipy.direction import ProbabilisticDirectionGetter
from dipy.data import small_sphere
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import save_tractogram
from dipy.tracking.local_tracking import LocalTracking
from dipy.tracking.streamline import Streamlines
fod = csd_fit.odf(small_sphere)
pmf = fod.clip(min=0)
prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30.,
sphere=small_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, 'tractogram_probabilistic_dg_pmf.trk'))
```
We will easily generate the anatomical views on the generated tractogram using the `generate_anatomical_volume_figure` helper function:
```
from fury import actor, colormap
from utils.visualization_utils import generate_anatomical_volume_figure
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Compute the slices to be shown
slices = tuple(elem // 2 for elem in dwi_data.shape[:-1])
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(out_dir, "tractogram_probabilistic_dg_pmf.png"),
dpi=300, bbox_inches="tight")
plt.show()
```
One disadvantage of using a discrete PMF to represent possible tracking
directions is that it tends to take up a lot of RAM memory. The size of the
PMF, the FOD in this case, must be equal to the number of possible tracking
directions on the hemisphere, and every voxel has a unique PMF. In this case
the data is ``(81, 106, 76)`` and ``small_sphere`` has 181 directions so the
FOD is ``(81, 106, 76, 181)``. One way to avoid sampling the PMF and holding it
in memory is to build the direction getter directly from the spherical harmonic
(SH) representation of the FOD. By using this approach, we can also use a
larger sphere, like ``default_sphere`` which has 362 directions on the
hemisphere, without having to worry about memory limitations.
```
from dipy.data import default_sphere
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, 'tractogram_probabilistic_dg_sh.trk'))
```
We will visualize the tractogram using the three usual anatomical views:
```
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(out_dir, "tractogram_probabilistic_dg_sh.png"),
dpi=300, bbox_inches="tight")
plt.show()
```
Not all model fits have the ``shm_coeff`` attribute because not all models use
this basis to represent the data internally. However we can fit the ODF of any
model to the spherical harmonic basis using the ``peaks_from_model`` function.
```
from dipy.direction import peaks_from_model
peaks = peaks_from_model(csd_model, dwi_data, default_sphere, .5, 25,
mask=seed_mask, return_sh=True, parallel=True)
```
It is always good practice to (save and) visualize the peaks as a check towards ensuring that the orientation information conforms to what is expected prior to the tracking process.
```
# Save the peaks
from dipy.io.peaks import reshape_peaks_for_visualization
nib.save(nib.Nifti1Image(reshape_peaks_for_visualization(peaks),
affine), os.path.join(out_dir, 'peaks.nii.gz'))
```
As usual, we will use `FURY` to visualize the peaks:
```
from utils.visualization_utils import generate_anatomical_slice_figure
# Visualize the peaks
# Build the representation of the data
peaks_actor = actor.peak_slicer(peaks.peak_dirs, peaks.peak_values)
# Compute the slices to be shown
slices = tuple(elem // 2 for elem in dwi_data.shape[:-1])
# Generate the figure
fig = generate_anatomical_slice_figure(slices, peaks_actor)
fig.savefig(os.path.join(out_dir, "peaks.png"), dpi=300, bbox_inches="tight")
plt.show()
fod_coeff = peaks.shm_coeff
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(fod_coeff, max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, "tractogram_probabilistic_dg_sh_pmf.trk"))
```
We will again visualize the tractogram using the three usual anatomical views:
```
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(
out_dir, "tractogram_probabilistic_dg_sh_pmf.png"), dpi=300, bbox_inches="tight")
plt.show()
```
## Tip: Making sure your tractogram is well aligned with the data
If for whatever reason the anatomical and diffusion images were not correctly aligned, you may find that your tractogram is not well aligned with the anatomical data. This may also happen derived from the different formats in which a tractogram is saved/loaded, some conventions specifying the origin at the voxel corner and other specifying it at the center of the voxel. Visualizing the computed features is always recommended. There are some tools that allow to ensure that the matrices specifying the orientation and positioning of the data should be correct.
`MRtrix`'s `mrinfo` command can be used to visualize the affine matrix of a `NIfTI` file as:
`mrinfo dwi.nii.gz`
which would output something like:
```
************************************************
Image: "/data/dwi.nii.gz"
************************************************
Dimensions: 90 x 108 x 90 x 33
Voxel size: 2 x 2 x 2 x 1
Data strides: [ -1 -2 3 4 ]
Format: NIfTI-1.1 (GZip compressed)
Data type: signed 16 bit integer (little endian)
Intensity scaling: offset = 0, multiplier = 1
Transform: 1 -0 0 -178
-0 1 0 -214
-0 -0 1 -0
```
Similarly, for your tractograms, you may use the command `track_info` from `TrackVis`' `Diffusion Toolkit` set of command-line tools:
`trk_info tractogram.trk`
which would output something like:
```
ID string: TRACK
Version: 2
Dimension: 180 216 180
Voxel size: 1 1 1
Voxel order: LPS
Voxel order original: LPS
Voxel to RAS matrix:
-1.0000 0.0000 0.0000 0.5000
0.0000 -1.0000 0.0000 0.5000
0.0000 0.0000 1.0000 -0.5000
0.0000 0.0000 0.0000 1.0000
Image Orientation: 1.0000/0.0000/0.0000/0.0000/1.0000/0.0000
Orientation patches: none
Number of scalars: 0
Number of properties: 0
Number of tracks: 200433
```
Note that, a `TRK` file contains orientational and positional information. If you choose to store your tractograms using the `TCK` format, this informationwill not be contained in the file. To see the file header information you may use the `MRtrix` `tckinfo` command:
`tckinfo tractogram.tck`
which would output something like:
```
***********************************
Tracks file: "/data/tractogram.tck"
count: 0000200433
dimensions: (180, 216, 180)
voxel_order: LPS
voxel_sizes: (1.0, 1.0, 1.0)
```
|
github_jupyter
|
import os
import nibabel as nib
import numpy as np
from bids.layout import BIDSLayout
from dipy.core.gradients import gradient_table
from dipy.io.gradients import read_bvals_bvecs
# Get the diffusion files
dwi_layout = BIDSLayout(
'../data/ds000221/derivatives/uncorrected_topup_eddy/', validate=False)
gradient_layout = BIDSLayout(
'../data/ds000221/sub-010006/ses-01/dwi/', validate=False)
subj = '010006'
dwi_fname = dwi_layout.get(subject=subj, suffix='dwi',
extension='.nii.gz', return_type='file')[0]
bval_fname = gradient_layout.get(
subject=subj, suffix='dwi', extension='.bval', return_type='file')[0]
bvec_fname = dwi_layout.get(
subject=subj, extension='.eddy_rotated_bvecs', return_type='file')[0]
dwi_img = nib.load(dwi_fname)
affine = dwi_img.affine
bvals, bvecs = read_bvals_bvecs(bval_fname, bvec_fname)
gtab = gradient_table(bvals, bvecs)
from dipy.reconst import dti
from dipy.segment.mask import median_otsu
from dipy.tracking import utils
dwi_data = dwi_img.get_fdata()
# Specify the volume index to the b0 volumes
dwi_data, dwi_mask = median_otsu(dwi_data, vol_idx=[0], numpass=1)
dti_model = dti.TensorModel(gtab)
# This step may take a while
dti_fit = dti_model.fit(dwi_data, mask=dwi_mask)
# Create the seeding mask
fa_img = dti_fit.fa
seed_mask = fa_img.copy()
seed_mask[seed_mask >= 0.2] = 1
seed_mask[seed_mask < 0.2] = 0
# Create the seeds
seeds = utils.seeds_from_mask(seed_mask, affine=affine, density=1)
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response_ssst)
response, ratio = auto_response_ssst(gtab, dwi_data, roi_radii=10, fa_thr=0.7)
sh_order = 2
csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order)
csd_fit = csd_model.fit(dwi_data, mask=seed_mask)
from scipy import ndimage # To rotate image for visualization purposes
import matplotlib.pyplot as plt
from dipy.reconst.shm import CsaOdfModel
from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion
csa_model = CsaOdfModel(gtab, sh_order=sh_order)
gfa = csa_model.fit(dwi_data, mask=seed_mask).gfa
stopping_criterion = ThresholdStoppingCriterion(gfa, .25)
# Create the directory to save the results
out_dir = '../data/ds000221/derivatives/dwi/tractography/sub-%s/ses-01/dwi/' % subj
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Save the GFA
gfa_img = nib.Nifti1Image(gfa.astype(np.float32), affine)
nib.save(gfa_img, os.path.join(out_dir, 'gfa.nii.gz'))
# Plot the GFA
%matplotlib inline
fig, ax = plt.subplots(1, 3, figsize=(10, 10))
ax[0].imshow(ndimage.rotate(gfa[:, gfa.shape[1]//2, :], 90, reshape=False))
ax[1].imshow(ndimage.rotate(gfa[gfa.shape[0]//2, :, :], 90, reshape=False))
ax[2].imshow(ndimage.rotate(gfa[:, :, gfa.shape[-1]//2], 90, reshape=False))
fig.savefig(os.path.join(out_dir, "gfa.png"), dpi=300, bbox_inches="tight")
plt.show()
from dipy.direction import ProbabilisticDirectionGetter
from dipy.data import small_sphere
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import save_tractogram
from dipy.tracking.local_tracking import LocalTracking
from dipy.tracking.streamline import Streamlines
fod = csd_fit.odf(small_sphere)
pmf = fod.clip(min=0)
prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30.,
sphere=small_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, 'tractogram_probabilistic_dg_pmf.trk'))
from fury import actor, colormap
from utils.visualization_utils import generate_anatomical_volume_figure
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Compute the slices to be shown
slices = tuple(elem // 2 for elem in dwi_data.shape[:-1])
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(out_dir, "tractogram_probabilistic_dg_pmf.png"),
dpi=300, bbox_inches="tight")
plt.show()
from dipy.data import default_sphere
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, 'tractogram_probabilistic_dg_sh.trk'))
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(out_dir, "tractogram_probabilistic_dg_sh.png"),
dpi=300, bbox_inches="tight")
plt.show()
from dipy.direction import peaks_from_model
peaks = peaks_from_model(csd_model, dwi_data, default_sphere, .5, 25,
mask=seed_mask, return_sh=True, parallel=True)
# Save the peaks
from dipy.io.peaks import reshape_peaks_for_visualization
nib.save(nib.Nifti1Image(reshape_peaks_for_visualization(peaks),
affine), os.path.join(out_dir, 'peaks.nii.gz'))
from utils.visualization_utils import generate_anatomical_slice_figure
# Visualize the peaks
# Build the representation of the data
peaks_actor = actor.peak_slicer(peaks.peak_dirs, peaks.peak_values)
# Compute the slices to be shown
slices = tuple(elem // 2 for elem in dwi_data.shape[:-1])
# Generate the figure
fig = generate_anatomical_slice_figure(slices, peaks_actor)
fig.savefig(os.path.join(out_dir, "peaks.png"), dpi=300, bbox_inches="tight")
plt.show()
fod_coeff = peaks.shm_coeff
prob_dg = ProbabilisticDirectionGetter.from_shcoeff(fod_coeff, max_angle=30.,
sphere=default_sphere)
streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds,
affine, step_size=.5)
streamlines = Streamlines(streamline_generator)
sft = StatefulTractogram(streamlines, dwi_img, Space.RASMM)
# Save the tractogram
save_tractogram(sft, os.path.join(
out_dir, "tractogram_probabilistic_dg_sh_pmf.trk"))
# Plot the tractogram
# Build the representation of the data
streamlines_actor = actor.line(streamlines, colormap.line_colors(streamlines))
# Generate the figure
fig = generate_anatomical_volume_figure(streamlines_actor)
fig.savefig(os.path.join(
out_dir, "tractogram_probabilistic_dg_sh_pmf.png"), dpi=300, bbox_inches="tight")
plt.show()
************************************************
Image: "/data/dwi.nii.gz"
************************************************
Dimensions: 90 x 108 x 90 x 33
Voxel size: 2 x 2 x 2 x 1
Data strides: [ -1 -2 3 4 ]
Format: NIfTI-1.1 (GZip compressed)
Data type: signed 16 bit integer (little endian)
Intensity scaling: offset = 0, multiplier = 1
Transform: 1 -0 0 -178
-0 1 0 -214
-0 -0 1 -0
ID string: TRACK
Version: 2
Dimension: 180 216 180
Voxel size: 1 1 1
Voxel order: LPS
Voxel order original: LPS
Voxel to RAS matrix:
-1.0000 0.0000 0.0000 0.5000
0.0000 -1.0000 0.0000 0.5000
0.0000 0.0000 1.0000 -0.5000
0.0000 0.0000 0.0000 1.0000
Image Orientation: 1.0000/0.0000/0.0000/0.0000/1.0000/0.0000
Orientation patches: none
Number of scalars: 0
Number of properties: 0
Number of tracks: 200433
***********************************
Tracks file: "/data/tractogram.tck"
count: 0000200433
dimensions: (180, 216, 180)
voxel_order: LPS
voxel_sizes: (1.0, 1.0, 1.0)
| 0.624294 | 0.952309 |
<a href="https://colab.research.google.com/github/lmcanavals/data_mining/blob/main/notebooks/aa_preprocessing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from sklearn import preprocessing
import numpy as np
```
## Standarization
Llevar los datos de las columnas a la misma distribución.
```
X_train = np.array([[1, -1, 2], [2, 0, 0], [0, 1, -1]])
X_scaled = preprocessing.scale(X_train)
print(X_scaled)
print(X_scaled.mean(axis=0))
print(X_scaled.std(axis=0))
```
Cuando queremos aplicar scale a otros datos, es mejor crear un scaler
```
scaler = preprocessing.StandardScaler().fit(X_train)
print(scaler)
print(scaler.mean_)
print(scaler.scale_)
scaler.transform(X_train)
X_test = [[-1, 1, 0]]
scaler.transform(X_test)
```
## Normalization
```
X_normalized = preprocessing.normalize(X_train, norm='l2')
print(X_normalized)
normalizer = preprocessing.Normalizer().fit(X_train)
print(normalizer)
normalizer.transform(X_train)
normalizer.transform(X_test)
```
## Missing values
```
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit([[ 1, 2],
[np.nan, 3],
[ 7, 6]])
X = [[np.nan, 2],
[ 6, np.nan],
[ 7, 6],
[np.nan, 4],
[ 1, 1],
[np.nan, np.nan]]
imp.transform(X)
import scipy.sparse as sp
X = sp.csc_matrix([[1, 2], [0, -1], [7, 6]])
imp = SimpleImputer(missing_values=-1, strategy ='mean')
imp.fit(X)
X_test = sp.csc_matrix ([[-1, 2], [6, -1], [7, 6]])
imp.transform(X_test).toarray()
```
## Rescaling
```
import pandas as pd
import scipy
from sklearn.preprocessing import MinMaxScaler
url="https://gist.githubusercontent.com/ktisha/c21e73a1bd1700294ef790c56c8aec1f/raw/819b69b5736821ccee93d05b51de0510bea00294/pima-indians-diabetes.csv"
names=['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pd.read_csv(url, names=names, comment="#")
array = dataframe.values
X = array[:, :-1]
Y = array[:, -1]
scaler = MinMaxScaler(feature_range=(0, 1))
X_rescaled = scaler.fit_transform(X)
np.set_printoptions(precision=3)
X_rescaled[:5]
```
## Binarization
```
from sklearn.preprocessing import Binarizer
binarizer = Binarizer(threshold=0.5).fit(X_rescaled)
X_binarized = binarizer.transform(X_rescaled)
X_binarized[:5]
```
## Standarization
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X)
rescaledX = scaler.transform(X)
rescaledX[:5]
```
|
github_jupyter
|
from sklearn import preprocessing
import numpy as np
X_train = np.array([[1, -1, 2], [2, 0, 0], [0, 1, -1]])
X_scaled = preprocessing.scale(X_train)
print(X_scaled)
print(X_scaled.mean(axis=0))
print(X_scaled.std(axis=0))
scaler = preprocessing.StandardScaler().fit(X_train)
print(scaler)
print(scaler.mean_)
print(scaler.scale_)
scaler.transform(X_train)
X_test = [[-1, 1, 0]]
scaler.transform(X_test)
X_normalized = preprocessing.normalize(X_train, norm='l2')
print(X_normalized)
normalizer = preprocessing.Normalizer().fit(X_train)
print(normalizer)
normalizer.transform(X_train)
normalizer.transform(X_test)
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit([[ 1, 2],
[np.nan, 3],
[ 7, 6]])
X = [[np.nan, 2],
[ 6, np.nan],
[ 7, 6],
[np.nan, 4],
[ 1, 1],
[np.nan, np.nan]]
imp.transform(X)
import scipy.sparse as sp
X = sp.csc_matrix([[1, 2], [0, -1], [7, 6]])
imp = SimpleImputer(missing_values=-1, strategy ='mean')
imp.fit(X)
X_test = sp.csc_matrix ([[-1, 2], [6, -1], [7, 6]])
imp.transform(X_test).toarray()
import pandas as pd
import scipy
from sklearn.preprocessing import MinMaxScaler
url="https://gist.githubusercontent.com/ktisha/c21e73a1bd1700294ef790c56c8aec1f/raw/819b69b5736821ccee93d05b51de0510bea00294/pima-indians-diabetes.csv"
names=['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pd.read_csv(url, names=names, comment="#")
array = dataframe.values
X = array[:, :-1]
Y = array[:, -1]
scaler = MinMaxScaler(feature_range=(0, 1))
X_rescaled = scaler.fit_transform(X)
np.set_printoptions(precision=3)
X_rescaled[:5]
from sklearn.preprocessing import Binarizer
binarizer = Binarizer(threshold=0.5).fit(X_rescaled)
X_binarized = binarizer.transform(X_rescaled)
X_binarized[:5]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X)
rescaledX = scaler.transform(X)
rescaledX[:5]
| 0.440229 | 0.964589 |
# PID Controller
Self-Driving Car Engineer Nanodegree Program
In this project I implemented a PID controller to steer an autonomous driving car around a track in a simulator. The code is written in C++. This work is part of the Self-Driving Car Engineer Nanodegree Program at Udacity.
---
[//]: # (Image References)
[image1]: ./output_images/image.png
[image2]: ./output_images/image.png
## Dependencies
* cmake >= 3.5
* All OSes: [click here for installation instructions](https://cmake.org/install/)
* make >= 4.1(mac, linux), 3.81(Windows)
* Linux: make is installed by default on most Linux distros
* Mac: [install Xcode command line tools to get make](https://developer.apple.com/xcode/features/)
* Windows: [Click here for installation instructions](http://gnuwin32.sourceforge.net/packages/make.htm)
* gcc/g++ >= 5.4
* Linux: gcc / g++ is installed by default on most Linux distros
* Mac: same deal as make - [install Xcode command line tools]((https://developer.apple.com/xcode/features/)
* Windows: recommend using [MinGW](http://www.mingw.org/)
* [uWebSockets](https://github.com/uWebSockets/uWebSockets)
* Run either `./install-mac.sh` or `./install-ubuntu.sh`.
* If you install from source, checkout to commit `e94b6e1`, i.e.
```
git clone https://github.com/uWebSockets/uWebSockets
cd uWebSockets
git checkout e94b6e1
```
Some function signatures have changed in v0.14.x. See [this PR](https://github.com/udacity/CarND-MPC-Project/pull/3) for more details.
* Simulator. You can download these from the [project intro page](https://github.com/udacity/self-driving-car-sim/releases) in the classroom.
Fellow students have put together a guide to Windows set-up for the project [here](https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/files/Kidnapped_Vehicle_Windows_Setup.pdf) if the environment you have set up for the Sensor Fusion projects does not work for this project. There's also an experimental patch for windows in this [PR](https://github.com/udacity/CarND-PID-Control-Project/pull/3).
## Basic Build Instructions
1. Clone this repo.
2. Make a build directory: `mkdir build && cd build`
3. Compile: `cmake .. && make`
4. Run it: `./pid`.
Tips for setting up your environment can be found [here](https://classroom.udacity.com/nanodegrees/nd013/parts/40f38239-66b6-46ec-ae68-03afd8a601c8/modules/0949fca6-b379-42af-a919-ee50aa304e6a/lessons/f758c44c-5e40-4e01-93b5-1a82aa4e044f/concepts/23d376c7-0195-4276-bdf0-e02f1f3c665d)
## Implementation
### Understanding of PID controller:
* The overall control function
\begin{equation}
u(t)= kp*e(t) + ki* \int_{0}^{t}e(t')dt' +kd* \frac{de(t)}{dt}
\end{equation}
Where in our project, we take the negative value of the cross track error (CTE) as error "e(t)". The proportional part of the control law make the vehicle turns when it is away from the center of the road. The derivative term provides a better damping so it steers gracefully, while the integral term helps to reduce the steady state error of the system. In this project, we need to tune the parameters kp,kd and ki either manually or automatically. The following data are what I tuned for this project, which worked well. And also, I impletemented the PID controller for both steering angle and speed of the car in the simulator. I chose manual method, following the idea of the twiddle algorithm.
| | steering angle | speed |
| ----------- | ----------- |----------- |
| kd | 0.12 | 0.1 |
| kd | 1.0 |0.0 |
| ki | 0.0001 |0.002 |
* Implementation of the above equation
```cpp
void PID::Init(double Kp, double Ki, double Kd) {
/*Set Kp, Ki and Kd to initial values passed by controller. These are passed from main*/
this->Kp = Kp;
this->Ki = Ki;
this->Kd = Kd;
/*Set up inital p, i and d error to zero.*/
p_error = 0;
i_error = 0;
d_error = 0;
}
void PID::UpdateError(double cte) {
d_error = cte - p_error;
p_error = cte;
i_error += cte;
}
double PID::TotalError() {
return (-(Kp * p_error) - (Ki * i_error) - (Kd * d_error));
}
}
```
* Implementation in the simulator
![alt text][image1]
|
github_jupyter
|
git clone https://github.com/uWebSockets/uWebSockets
cd uWebSockets
git checkout e94b6e1
```
Some function signatures have changed in v0.14.x. See [this PR](https://github.com/udacity/CarND-MPC-Project/pull/3) for more details.
* Simulator. You can download these from the [project intro page](https://github.com/udacity/self-driving-car-sim/releases) in the classroom.
Fellow students have put together a guide to Windows set-up for the project [here](https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/files/Kidnapped_Vehicle_Windows_Setup.pdf) if the environment you have set up for the Sensor Fusion projects does not work for this project. There's also an experimental patch for windows in this [PR](https://github.com/udacity/CarND-PID-Control-Project/pull/3).
## Basic Build Instructions
1. Clone this repo.
2. Make a build directory: `mkdir build && cd build`
3. Compile: `cmake .. && make`
4. Run it: `./pid`.
Tips for setting up your environment can be found [here](https://classroom.udacity.com/nanodegrees/nd013/parts/40f38239-66b6-46ec-ae68-03afd8a601c8/modules/0949fca6-b379-42af-a919-ee50aa304e6a/lessons/f758c44c-5e40-4e01-93b5-1a82aa4e044f/concepts/23d376c7-0195-4276-bdf0-e02f1f3c665d)
## Implementation
### Understanding of PID controller:
* The overall control function
\begin{equation}
u(t)= kp*e(t) + ki* \int_{0}^{t}e(t')dt' +kd* \frac{de(t)}{dt}
\end{equation}
Where in our project, we take the negative value of the cross track error (CTE) as error "e(t)". The proportional part of the control law make the vehicle turns when it is away from the center of the road. The derivative term provides a better damping so it steers gracefully, while the integral term helps to reduce the steady state error of the system. In this project, we need to tune the parameters kp,kd and ki either manually or automatically. The following data are what I tuned for this project, which worked well. And also, I impletemented the PID controller for both steering angle and speed of the car in the simulator. I chose manual method, following the idea of the twiddle algorithm.
| | steering angle | speed |
| ----------- | ----------- |----------- |
| kd | 0.12 | 0.1 |
| kd | 1.0 |0.0 |
| ki | 0.0001 |0.002 |
* Implementation of the above equation
| 0.835584 | 0.90882 |
<!-- dom:TITLE: Week 34: Introduction to the course, Logistics and Practicalities -->
# Week 34: Introduction to the course, Logistics and Practicalities
<!-- dom:AUTHOR: Morten Hjorth-Jensen at Department of Physics, University of Oslo & Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University -->
<!-- Author: -->
**Morten Hjorth-Jensen**, Department of Physics, University of Oslo and Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University
Date: **Sep 16, 2020**
Copyright 1999-2020, Morten Hjorth-Jensen. Released under CC Attribution-NonCommercial 4.0 license
## Overview of first week
* Thursday August 20: First lecture: Presentation of the course, aims and content
* Thursday: Second Lecture: Start with simple linear regression and repetition of linear algebra and elements of statistics
* Friday August 21: Linear regression
* Computer lab: Wednesdays, 8am-6pm. First time: Wednesday August 26.
## Thursday August 20
[Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/forelesningsvideoer/zoom_0.mp4?vrtx=view-as-webpage).
## Lectures and ComputerLab
* Lectures: Thursday (12.15pm-2pm and Friday (12.15pm-2pm). Due to the present COVID-19 situation all lectures will be online. They will be recorded and posted online at the official UiO [website](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/index.html).
* Weekly reading assignments and videos needed to solve projects and exercises.
* Weekly exercises when not working on projects. You can hand in exercises if you want.
* Detailed lecture notes, exercises, all programs presented, projects etc can be found at the homepage of the course.
* Weekly plans and all other information are on the official webpage.
* No final exam, three projects that are graded and have to be approved.
## Course Format
* Three compulsory projects. Electronic reports only using [Canvas](https://www.uio.no/english/services/it/education/canvas/) to hand in projects and [git](https://git-scm.com/) as version control software and [GitHub](https://github.com/) for repository (or [GitLab](https://about.gitlab.com/)) of all your material.
* Evaluation and grading: The three projects are graded and each counts 1/3 of the final mark. No final written or oral exam.
a. For the last project each group/participant submits a proposal or works with suggested (by us) proposals for the project.
b. If possible, we would like to organize the last project as a workshop where each group makes a poster and presents this to all other participants of the course
c. Poster session where all participants can study and discuss the other proposals.
d. Based on feedback etc, each group finalizes the report and submits for grading.
* Python is the default programming language, but feel free to use C/C++ and/or Fortran or other programming languages. All source codes discussed during the lectures can be found at the webpage and [github address](https://github.com/CompPhysics/MachineLearning/tree/master/doc/Programs) of the course.
## Teachers
**Teachers :**
* Morten Hjorth-Jensen, [email protected]
* **Phone**: +47-48257387
* **Office**: Department of Physics, University of Oslo, Eastern wing, room FØ470
* **Office hours**: *Anytime*! In Fall Semester 2020 (FS20), as a rule of thumb office hours are planned via computer or telephone. Individual or group office hours will be performed via zoom. Feel free to send an email for planning. In person meetings may also be possible if allowed by the University of Oslo's COVID-19 instructions.
* Øyvind Sigmundson Schøyen, [email protected]
* **Office**: Department of Physics, University of Oslo, Eastern wing, room FØ452
* Michael Bitney, [email protected]
* Kristian Wold, [email protected]
* Nicolai Haug, [email protected]
* Per-Dimitri Sønsteland, [email protected]
## Deadlines for projects (tentative)
1. Project 1: September 28 (graded with feedback)
2. Project 2: November 2 (graded with feedback)
3. Project 3: December 7 (graded with feedback)
Projects are handed in using **Canvas**. We use Github as repository for codes, benchmark calculations etc. Comments and feedback on projects only via **Canvas**.
## Recommended textbooks
* [Trevor Hastie, Robert Tibshirani, Jerome H. Friedman, The Elements of Statistical Learning, Springer](https://www.springer.com/gp/book/9780387848570)
* [Aurelien Geron, Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow, 2nd Edition](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/)
## Prerequisites
Basic knowledge in programming and mathematics, with an emphasis on
linear algebra. Knowledge of Python or/and C++ as programming
languages is strongly recommended and experience with Jupiter notebook
is recommended. Required courses are the equivalents to the University
of Oslo mathematics courses MAT1100, MAT1110, MAT1120 and at least one
of the corresponding computing and programming courses INF1000/INF1110
or MAT-INF1100/MAT-INF1100L/BIOS1100/KJM-INF1100. Most universities
offer nowadays a basic programming course (often compulsory) where
Python is the recurring programming language.
## Learning outcomes
This course aims at giving you insights and knowledge about many of the central algorithms used in Data Analysis and Machine Learning. The course is project based and through various numerical projects, normally three, you will be exposed to fundamental research problems in these fields, with the aim to reproduce state of the art scientific results. Both supervised and unsupervised methods will be covered. The emphasis is on a frequentist approach, although we will try to link it with a Bayesian approach as well. You will learn to develop and structure large codes for studying different cases where Machine Learning is applied to, get acquainted with computing facilities and learn to handle large scientific projects. A good scientific and ethical conduct is emphasized throughout the course. More specifically, after this course you will
* Learn about basic data analysis, statistical analysis, Bayesian statistics, Monte Carlo sampling, data optimization and machine learning;
* Be capable of extending the acquired knowledge to other systems and cases;
* Have an understanding of central algorithms used in data analysis and machine learning;
* Understand linear methods for regression and classification, from ordinary least squares, via Lasso and Ridge to Logistic regression;
* Learn about neural networks and deep learning methods for supervised and unsupervised learning. Emphasis on feed forward neural networks, convolutional and recurrent neural networks;
* Learn about about decision trees, random forests, bagging and boosting methods;
* Learn about support vector machines and kernel transformations;
* Reduction of data sets, from PCA to clustering;
* Autoencoders and Reinforcement Learning;
* Work on numerical projects to illustrate the theory. The projects play a central role and you are expected to know modern programming languages like Python or C++ and/or Fortran (Fortran2003 or later).
## Topics covered in this course: Statistical analysis and optimization of data
The course has two central parts
1. Statistical analysis and optimization of data
2. Machine learning
These topics will be scattered thorughout the course and may not necessarily be taught separately. Rather, we will often take an approach (during the lectures and project/exercise sessions) where say elements from statistical data analysis are mixed with specific Machine Learning algorithms
**Statistical analysis and optimization of data.**
The following topics will be covered
* Basic concepts, expectation values, variance, covariance, correlation functions and errors;
* Simpler models, binomial distribution, the Poisson distribution, simple and multivariate normal distributions;
* Central elements of Bayesian statistics and modeling;
* Gradient methods for data optimization,
* Monte Carlo methods, Markov chains, Gibbs sampling and Metropolis-Hastings sampling;
* Estimation of errors and resampling techniques such as the cross-validation, blocking, bootstrapping and jackknife methods;
* Principal Component Analysis (PCA) and its mathematical foundation
## Topics covered in this course: Machine Learning
The following topics will be covered
* Linear Regression and Logistic Regression;
* Neural networks and deep learning, including convolutional and recurrent neural networks
* Decisions trees, Random Forests, Bagging and Boosting
* Support vector machines
* Bayesian linear and logistic regression
* Boltzmann Machines
* Unsupervised learning Dimensionality reduction, from PCA to cluster models
Hands-on demonstrations, exercises and projects aim at deepening your understanding of these topics.
## Extremely useful tools, strongly recommended
**and discussed at the lab sessions.**
* GIT for version control, and GitHub or GitLab as repositories, highly recommended. This will be discussed during the first exercise session
* Anaconda and other Python environments, see intro slides and first exercise session
## Other courses on Data science and Machine Learning at UiO
The link here <https://www.mn.uio.no/english/research/about/centre-focus/innovation/data-science/studies/> gives an excellent overview of courses on Machine learning at UiO.
1. [STK2100 Machine learning and statistical methods for prediction and classification](http://www.uio.no/studier/emner/matnat/math/STK2100/index-eng.html).
2. [IN3050 Introduction to Artificial Intelligence and Machine Learning](https://www.uio.no/studier/emner/matnat/ifi/IN3050/index-eng.html). Introductory course in machine learning and AI with an algorithmic approach.
3. [STK-INF3000/4000 Selected Topics in Data Science](http://www.uio.no/studier/emner/matnat/math/STK-INF3000/index-eng.html). The course provides insight into selected contemporary relevant topics within Data Science.
4. [IN4080 Natural Language Processing](https://www.uio.no/studier/emner/matnat/ifi/IN4080/index.html). Probabilistic and machine learning techniques applied to natural language processing.
5. [STK-IN4300 Statistical learning methods in Data Science](https://www.uio.no/studier/emner/matnat/math/STK-IN4300/index-eng.html). An advanced introduction to statistical and machine learning. For students with a good mathematics and statistics background.
6. [INF4490 Biologically Inspired Computing](http://www.uio.no/studier/emner/matnat/ifi/INF4490/). An introduction to self-adapting methods also called artificial intelligence or machine learning.
7. [IN-STK5000 Adaptive Methods for Data-Based Decision Making](https://www.uio.no/studier/emner/matnat/ifi/IN-STK5000/index-eng.html). Methods for adaptive collection and processing of data based on machine learning techniques.
8. [IN5400/INF5860 Machine Learning for Image Analysis](https://www.uio.no/studier/emner/matnat/ifi/IN5400/). An introduction to deep learning with particular emphasis on applications within Image analysis, but useful for other application areas too.
9. [TEK5040 Deep learning for autonomous systems](https://www.uio.no/studier/emner/matnat/its/TEK5040/). The course addresses advanced algorithms and architectures for deep learning with neural networks. The course provides an introduction to how deep-learning techniques can be used in the construction of key parts of advanced autonomous systems that exist in physical environments and cyber environments.
10. [STK4051 Computational Statistics](https://www.uio.no/studier/emner/matnat/math/STK4051/index-eng.html)
11. [STK4021 Applied Bayesian Analysis and Numerical Methods](https://www.uio.no/studier/emner/matnat/math/STK4021/index-eng.html)
## Introduction
Our emphasis throughout this series of lectures
is on understanding the mathematical aspects of
different algorithms used in the fields of data analysis and machine learning.
However, where possible we will emphasize the
importance of using available software. We start thus with a hands-on
and top-down approach to machine learning. The aim is thus to start with
relevant data or data we have produced
and use these to introduce statistical data analysis
concepts and machine learning algorithms before we delve into the
algorithms themselves. The examples we will use in the beginning, start with simple
polynomials with random noise added. We will use the Python
software package [Scikit-Learn](http://scikit-learn.org/stable/) and
introduce various machine learning algorithms to make fits of
the data and predictions. We move thereafter to more interesting
cases such as data from say experiments (below we will look at experimental nuclear binding energies as an example).
These are examples where we can easily set up the data and
then use machine learning algorithms included in for example
**Scikit-Learn**.
These examples will serve us the purpose of getting
started. Furthermore, they allow us to catch more than two birds with
a stone. They will allow us to bring in some programming specific
topics and tools as well as showing the power of various Python
libraries for machine learning and statistical data analysis.
Here, we will mainly focus on two
specific Python packages for Machine Learning, Scikit-Learn and
Tensorflow (see below for links etc). Moreover, the examples we
introduce will serve as inputs to many of our discussions later, as
well as allowing you to set up models and produce your own data and
get started with programming.
## What is Machine Learning?
Statistics, data science and machine learning form important fields of
research in modern science. They describe how to learn and make
predictions from data, as well as allowing us to extract important
correlations about physical process and the underlying laws of motion
in large data sets. The latter, big data sets, appear frequently in
essentially all disciplines, from the traditional Science, Technology,
Mathematics and Engineering fields to Life Science, Law, education
research, the Humanities and the Social Sciences.
It has become more
and more common to see research projects on big data in for example
the Social Sciences where extracting patterns from complicated survey
data is one of many research directions. Having a solid grasp of data
analysis and machine learning is thus becoming central to scientific
computing in many fields, and competences and skills within the fields
of machine learning and scientific computing are nowadays strongly
requested by many potential employers. The latter cannot be
overstated, familiarity with machine learning has almost become a
prerequisite for many of the most exciting employment opportunities,
whether they are in bioinformatics, life science, physics or finance,
in the private or the public sector. This author has had several
students or met students who have been hired recently based on their
skills and competences in scientific computing and data science, often
with marginal knowledge of machine learning.
Machine learning is a subfield of computer science, and is closely
related to computational statistics. It evolved from the study of
pattern recognition in artificial intelligence (AI) research, and has
made contributions to AI tasks like computer vision, natural language
processing and speech recognition. Many of the methods we will study are also
strongly rooted in basic mathematics and physics research.
Ideally, machine learning represents the science of giving computers
the ability to learn without being explicitly programmed. The idea is
that there exist generic algorithms which can be used to find patterns
in a broad class of data sets without having to write code
specifically for each problem. The algorithm will build its own logic
based on the data. You should however always keep in mind that
machines and algorithms are to a large extent developed by humans. The
insights and knowledge we have about a specific system, play a central
role when we develop a specific machine learning algorithm.
Machine learning is an extremely rich field, in spite of its young
age. The increases we have seen during the last three decades in
computational capabilities have been followed by developments of
methods and techniques for analyzing and handling large date sets,
relying heavily on statistics, computer science and mathematics. The
field is rather new and developing rapidly. Popular software packages
written in Python for machine learning like
[Scikit-learn](http://scikit-learn.org/stable/),
[Tensorflow](https://www.tensorflow.org/),
[PyTorch](http://pytorch.org/) and [Keras](https://keras.io/), all
freely available at their respective GitHub sites, encompass
communities of developers in the thousands or more. And the number of
code developers and contributors keeps increasing. Not all the
algorithms and methods can be given a rigorous mathematical
justification, opening up thereby large rooms for experimenting and
trial and error and thereby exciting new developments. However, a
solid command of linear algebra, multivariate theory, probability
theory, statistical data analysis, understanding errors and Monte
Carlo methods are central elements in a proper understanding of many
of algorithms and methods we will discuss.
## Types of Machine Learning
The approaches to machine learning are many, but are often split into
two main categories. In *supervised learning* we know the answer to a
problem, and let the computer deduce the logic behind it. On the other
hand, *unsupervised learning* is a method for finding patterns and
relationship in data sets without any prior knowledge of the system.
Some authours also operate with a third category, namely
*reinforcement learning*. This is a paradigm of learning inspired by
behavioral psychology, where learning is achieved by trial-and-error,
solely from rewards and punishment.
Another way to categorize machine learning tasks is to consider the
desired output of a system. Some of the most common tasks are:
* Classification: Outputs are divided into two or more classes. The goal is to produce a model that assigns inputs into one of these classes. An example is to identify digits based on pictures of hand-written ones. Classification is typically supervised learning.
* Regression: Finding a functional relationship between an input data set and a reference data set. The goal is to construct a function that maps input data to continuous output values.
* Clustering: Data are divided into groups with certain common traits, without knowing the different groups beforehand. It is thus a form of unsupervised learning.
The methods we cover have three main topics in common, irrespective of
whether we deal with supervised or unsupervised learning. The first
ingredient is normally our data set (which can be subdivided into
training and test data), the second item is a model which is normally a
function of some parameters. The model reflects our knowledge of the system (or lack thereof). As an example, if we know that our data show a behavior similar to what would be predicted by a polynomial, fitting our data to a polynomial of some degree would then determin our model.
The last ingredient is a so-called **cost**
function which allows us to present an estimate on how good our model
is in reproducing the data it is supposed to train.
At the heart of basically all ML algorithms there are so-called minimization algorithms, often we end up with various variants of **gradient** methods.
## Software and needed installations
We will make extensive use of Python as programming language and its
myriad of available libraries. You will find
Jupyter notebooks invaluable in your work. You can run **R**
codes in the Jupyter/IPython notebooks, with the immediate benefit of
visualizing your data. You can also use compiled languages like C++,
Rust, Julia, Fortran etc if you prefer. The focus in these lectures will be
on Python.
If you have Python installed (we strongly recommend Python3) and you feel
pretty familiar with installing different packages, we recommend that
you install the following Python packages via **pip** as
1. pip install numpy scipy matplotlib ipython scikit-learn mglearn sympy pandas pillow
For Python3, replace **pip** with **pip3**.
For OSX users we recommend, after having installed Xcode, to
install **brew**. Brew allows for a seamless installation of additional
software via for example
1. brew install python3
For Linux users, with its variety of distributions like for example the widely popular Ubuntu distribution,
you can use **pip** as well and simply install Python as
1. sudo apt-get install python3 (or python for pyhton2.7)
etc etc.
## Python installers
If you don't want to perform these operations separately and venture
into the hassle of exploring how to set up dependencies and paths, we
recommend two widely used distrubutions which set up all relevant
dependencies for Python, namely
* [Anaconda](https://docs.anaconda.com/),
which is an open source
distribution of the Python and R programming languages for large-scale
data processing, predictive analytics, and scientific computing, that
aims to simplify package management and deployment. Package versions
are managed by the package management system **conda**.
* [Enthought canopy](https://www.enthought.com/product/canopy/)
is a Python
distribution for scientific and analytic computing distribution and
analysis environment, available for free and under a commercial
license.
Furthermore, [Google's Colab](https://colab.research.google.com/notebooks/welcome.ipynb) is a free Jupyter notebook environment that requires
no setup and runs entirely in the cloud. Try it out!
## Useful Python libraries
Here we list several useful Python libraries we strongly recommend (if you use anaconda many of these are already there)
* [NumPy](https://www.numpy.org/) is a highly popular library for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
* [The pandas](https://pandas.pydata.org/) library provides high-performance, easy-to-use data structures and data analysis tools
* [Xarray](http://xarray.pydata.org/en/stable/) is a Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun!
* [Scipy](https://www.scipy.org/) (pronounced “Sigh Pie”) is a Python-based ecosystem of open-source software for mathematics, science, and engineering.
* [Matplotlib](https://matplotlib.org/) is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.
* [Autograd](https://github.com/HIPS/autograd) can automatically differentiate native Python and Numpy code. It can handle a large subset of Python's features, including loops, ifs, recursion and closures, and it can even take derivatives of derivatives of derivatives
* [SymPy](https://www.sympy.org/en/index.html) is a Python library for symbolic mathematics.
* [scikit-learn](https://scikit-learn.org/stable/) has simple and efficient tools for machine learning, data mining and data analysis
* [TensorFlow](https://www.tensorflow.org/) is a Python library for fast numerical computing created and released by Google
* [Keras](https://keras.io/) is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano
* And many more such as [pytorch](https://pytorch.org/), [Theano](https://pypi.org/project/Theano/) etc
## Installing R, C++, cython or Julia
You will also find it convenient to utilize **R**. We will mainly
use Python during our lectures and in various projects and exercises.
Those of you
already familiar with **R** should feel free to continue using **R**, keeping
however an eye on the parallel Python set ups. Similarly, if you are a
Python afecionado, feel free to explore **R** as well. Jupyter/Ipython
notebook allows you to run **R** codes interactively in your
browser. The software library **R** is really tailored for statistical data analysis
and allows for an easy usage of the tools and algorithms we will discuss in these
lectures.
To install **R** with Jupyter notebook
[follow the link here](https://mpacer.org/maths/r-kernel-for-ipython-notebook)
## Installing R, C++, cython, Numba etc
For the C++ aficionados, Jupyter/IPython notebook allows you also to
install C++ and run codes written in this language interactively in
the browser. Since we will emphasize writing many of the algorithms
yourself, you can thus opt for either Python or C++ (or Fortran or other compiled languages) as programming
languages.
To add more entropy, **cython** can also be used when running your
notebooks. It means that Python with the jupyter notebook
setup allows you to integrate widely popular softwares and tools for
scientific computing. Similarly, the
[Numba Python package](https://numba.pydata.org/) delivers increased performance
capabilities with minimal rewrites of your codes. With its
versatility, including symbolic operations, Python offers a unique
computational environment. Your jupyter notebook can easily be
converted into a nicely rendered **PDF** file or a Latex file for
further processing. For example, convert to latex as
pycod jupyter nbconvert filename.ipynb --to latex
And to add more versatility, the Python package [SymPy](http://www.sympy.org/en/index.html) is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) and is entirely written in Python.
Finally, if you wish to use the light mark-up language
[doconce](https://github.com/hplgit/doconce) you can convert a standard ascii text file into various HTML
formats, ipython notebooks, latex files, pdf files etc with minimal edits. These lectures were generated using **doconce**.
## Numpy examples and Important Matrix and vector handling packages
There are several central software libraries for linear algebra and eigenvalue problems. Several of the more
popular ones have been wrapped into ofter software packages like those from the widely used text **Numerical Recipes**. The original source codes in many of the available packages are often taken from the widely used
software package LAPACK, which follows two other popular packages
developed in the 1970s, namely EISPACK and LINPACK. We describe them shortly here.
* LINPACK: package for linear equations and least square problems.
* LAPACK:package for solving symmetric, unsymmetric and generalized eigenvalue problems. From LAPACK's website <http://www.netlib.org> it is possible to download for free all source codes from this library. Both C/C++ and Fortran versions are available.
* BLAS (I, II and III): (Basic Linear Algebra Subprograms) are routines that provide standard building blocks for performing basic vector and matrix operations. Blas I is vector operations, II vector-matrix operations and III matrix-matrix operations. Highly parallelized and efficient codes, all available for download from <http://www.netlib.org>.
## Basic Matrix Features
**Matrix properties reminder.**
$$
\mathbf{A} =
\begin{bmatrix} a_{11} & a_{12} & a_{13} & a_{14} \\
a_{21} & a_{22} & a_{23} & a_{24} \\
a_{31} & a_{32} & a_{33} & a_{34} \\
a_{41} & a_{42} & a_{43} & a_{44}
\end{bmatrix}\qquad
\mathbf{I} =
\begin{bmatrix} 1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1
\end{bmatrix}
$$
The inverse of a matrix is defined by
$$
\mathbf{A}^{-1} \cdot \mathbf{A} = I
$$
<table border="1">
<thead>
<tr><th align="center"> Relations </th> <th align="center"> Name </th> <th align="center"> matrix elements </th> </tr>
</thead>
<tbody>
<tr><td align="center"> $A = A^{T}$ </td> <td align="center"> symmetric </td> <td align="center"> $a_{ij} = a_{ji}$ </td> </tr>
<tr><td align="center"> $A = \left (A^{T} \right )^{-1}$ </td> <td align="center"> real orthogonal </td> <td align="center"> $\sum_k a_{ik} a_{jk} = \sum_k a_{ki} a_{kj} = \delta_{ij}$ </td> </tr>
<tr><td align="center"> $A = A^{ * }$ </td> <td align="center"> real matrix </td> <td align="center"> $a_{ij} = a_{ij}^{ * }$ </td> </tr>
<tr><td align="center"> $A = A^{\dagger}$ </td> <td align="center"> hermitian </td> <td align="center"> $a_{ij} = a_{ji}^{ * }$ </td> </tr>
<tr><td align="center"> $A = \left (A^{\dagger} \right )^{-1}$ </td> <td align="center"> unitary </td> <td align="center"> $\sum_k a_{ik} a_{jk}^{ * } = \sum_k a_{ki}^{ * } a_{kj} = \delta_{ij}$ </td> </tr>
</tbody>
</table>
### Some famous Matrices
* Diagonal if $a_{ij}=0$ for $i\ne j$
* Upper triangular if $a_{ij}=0$ for $i > j$
* Lower triangular if $a_{ij}=0$ for $i < j$
* Upper Hessenberg if $a_{ij}=0$ for $i > j+1$
* Lower Hessenberg if $a_{ij}=0$ for $i < j+1$
* Tridiagonal if $a_{ij}=0$ for $|i -j| > 1$
* Lower banded with bandwidth $p$: $a_{ij}=0$ for $i > j+p$
* Upper banded with bandwidth $p$: $a_{ij}=0$ for $i < j+p$
* Banded, block upper triangular, block lower triangular....
### More Basic Matrix Features
**Some Equivalent Statements.**
For an $N\times N$ matrix $\mathbf{A}$ the following properties are all equivalent
* If the inverse of $\mathbf{A}$ exists, $\mathbf{A}$ is nonsingular.
* The equation $\mathbf{Ax}=0$ implies $\mathbf{x}=0$.
* The rows of $\mathbf{A}$ form a basis of $R^N$.
* The columns of $\mathbf{A}$ form a basis of $R^N$.
* $\mathbf{A}$ is a product of elementary matrices.
* $0$ is not eigenvalue of $\mathbf{A}$.
## Numpy and arrays
[Numpy](http://www.numpy.org/) provides an easy way to handle arrays in Python. The standard way to import this library is as
```
import numpy as np
```
Here follows a simple example where we set up an array of ten elements, all determined by random numbers drawn according to the normal distribution,
```
n = 10
x = np.random.normal(size=n)
print(x)
```
We defined a vector $x$ with $n=10$ elements with its values given by the Normal distribution $N(0,1)$.
Another alternative is to declare a vector as follows
```
import numpy as np
x = np.array([1, 2, 3])
print(x)
```
Here we have defined a vector with three elements, with $x_0=1$, $x_1=2$ and $x_2=3$. Note that both Python and C++
start numbering array elements from $0$ and on. This means that a vector with $n$ elements has a sequence of entities $x_0, x_1, x_2, \dots, x_{n-1}$. We could also let (recommended) Numpy to compute the logarithms of a specific array as
```
import numpy as np
x = np.log(np.array([4, 7, 8]))
print(x)
```
In the last example we used Numpy's unary function $np.log$. This function is
highly tuned to compute array elements since the code is vectorized
and does not require looping. We normaly recommend that you use the
Numpy intrinsic functions instead of the corresponding **log** function
from Python's **math** module. The looping is done explicitely by the
**np.log** function. The alternative, and slower way to compute the
logarithms of a vector would be to write
```
import numpy as np
from math import log
x = np.array([4, 7, 8])
for i in range(0, len(x)):
x[i] = log(x[i])
print(x)
```
We note that our code is much longer already and we need to import the **log** function from the **math** module.
The attentive reader will also notice that the output is $[1, 1, 2]$. Python interprets automagically our numbers as integers (like the **automatic** keyword in C++). To change this we could define our array elements to be double precision numbers as
```
import numpy as np
x = np.log(np.array([4, 7, 8], dtype = np.float64))
print(x)
```
or simply write them as double precision numbers (Python uses 64 bits as default for floating point type variables), that is
```
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x)
```
To check the number of bytes (remember that one byte contains eight bits for double precision variables), you can use simple use the **itemsize** functionality (the array $x$ is actually an object which inherits the functionalities defined in Numpy) as
```
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x.itemsize)
```
## Matrices in Python
Having defined vectors, we are now ready to try out matrices. We can
define a $3 \times 3 $ real matrix $\hat{A}$ as (recall that we user
lowercase letters for vectors and uppercase letters for matrices)
```
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
print(A)
```
If we use the **shape** function we would get $(3, 3)$ as output, that is verifying that our matrix is a $3\times 3$ matrix. We can slice the matrix and print for example the first column (Python organized matrix elements in a row-major order, see below) as
```
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[:,0])
```
We can continue this was by printing out other columns or rows. The example here prints out the second column
```
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[1,:])
```
Numpy contains many other functionalities that allow us to slice, subdivide etc etc arrays. We strongly recommend that you look up the [Numpy website for more details](http://www.numpy.org/). Useful functions when defining a matrix are the **np.zeros** function which declares a matrix of a given dimension and sets all elements to zero
```
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to zero
A = np.zeros( (n, n) )
print(A)
```
or initializing all elements to
```
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to one
A = np.ones( (n, n) )
print(A)
```
or as unitarily distributed random numbers (see the material on random number generators in the statistics part)
```
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to random numbers with x \in [0, 1]
A = np.random.rand(n, n)
print(A)
```
As we will see throughout these lectures, there are several extremely useful functionalities in Numpy.
As an example, consider the discussion of the covariance matrix. Suppose we have defined three vectors
$\hat{x}, \hat{y}, \hat{z}$ with $n$ elements each. The covariance matrix is defined as
$$
\hat{\Sigma} = \begin{bmatrix} \sigma_{xx} & \sigma_{xy} & \sigma_{xz} \\
\sigma_{yx} & \sigma_{yy} & \sigma_{yz} \\
\sigma_{zx} & \sigma_{zy} & \sigma_{zz}
\end{bmatrix},
$$
where for example
$$
\sigma_{xy} =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}).
$$
The Numpy function **np.cov** calculates the covariance elements using the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have the exact mean values.
The following simple function uses the **np.vstack** function which takes each vector of dimension $1\times n$ and produces a $3\times n$ matrix $\hat{W}$
$$
\hat{W} = \begin{bmatrix} x_0 & y_0 & z_0 \\
x_1 & y_1 & z_1 \\
x_2 & y_2 & z_2 \\
\dots & \dots & \dots \\
x_{n-2} & y_{n-2} & z_{n-2} \\
x_{n-1} & y_{n-1} & z_{n-1}
\end{bmatrix},
$$
which in turn is converted into into the $3\times 3$ covariance matrix
$\hat{\Sigma}$ via the Numpy function **np.cov()**. We note that we can also calculate
the mean value of each set of samples $\hat{x}$ etc using the Numpy
function **np.mean(x)**. We can also extract the eigenvalues of the
covariance matrix through the **np.linalg.eig()** function.
```
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
z = x**3+np.random.normal(size=n)
print(np.mean(z))
W = np.vstack((x, y, z))
Sigma = np.cov(W)
print(Sigma)
Eigvals, Eigvecs = np.linalg.eig(Sigma)
print(Eigvals)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
eye = np.eye(4)
print(eye)
sparse_mtx = sparse.csr_matrix(eye)
print(sparse_mtx)
x = np.linspace(-10,10,100)
y = np.sin(x)
plt.plot(x,y,marker='x')
plt.show()
```
## Meet the Pandas
<!-- dom:FIGURE: [fig/pandas.jpg, width=600 frac=0.8] -->
<!-- begin figure -->
<p></p>
<img src="fig/pandas.jpg" width=600>
<!-- end figure -->
Another useful Python package is
[pandas](https://pandas.pydata.org/), which is an open source library
providing high-performance, easy-to-use data structures and data
analysis tools for Python. **pandas** stands for panel data, a term borrowed from econometrics and is an efficient library for data analysis with an emphasis on tabular data.
**pandas** has two major classes, the **DataFrame** class with two-dimensional data objects and tabular data organized in columns and the class **Series** with a focus on one-dimensional data objects. Both classes allow you to index data easily as we will see in the examples below.
**pandas** allows you also to perform mathematical operations on the data, spanning from simple reshapings of vectors and matrices to statistical operations.
The following simple example shows how we can, in an easy way make tables of our data. Here we define a data set which includes names, place of birth and date of birth, and displays the data in an easy to read way. We will see repeated use of **pandas**, in particular in connection with classification of data.
```
import pandas as pd
from IPython.display import display
data = {'First Name': ["Frodo", "Bilbo", "Aragorn II", "Samwise"],
'Last Name': ["Baggins", "Baggins","Elessar","Gamgee"],
'Place of birth': ["Shire", "Shire", "Eriador", "Shire"],
'Date of Birth T.A.': [2968, 2890, 2931, 2980]
}
data_pandas = pd.DataFrame(data)
display(data_pandas)
```
In the above we have imported **pandas** with the shorthand **pd**, the latter has become the standard way we import **pandas**. We make then a list of various variables
and reorganize the aboves lists into a **DataFrame** and then print out a neat table with specific column labels as *Name*, *place of birth* and *date of birth*.
Displaying these results, we see that the indices are given by the default numbers from zero to three.
**pandas** is extremely flexible and we can easily change the above indices by defining a new type of indexing as
```
data_pandas = pd.DataFrame(data,index=['Frodo','Bilbo','Aragorn','Sam'])
display(data_pandas)
```
Thereafter we display the content of the row which begins with the index **Aragorn**
```
display(data_pandas.loc['Aragorn'])
```
We can easily append data to this, for example
```
new_hobbit = {'First Name': ["Peregrin"],
'Last Name': ["Took"],
'Place of birth': ["Shire"],
'Date of Birth T.A.': [2990]
}
data_pandas=data_pandas.append(pd.DataFrame(new_hobbit, index=['Pippin']))
display(data_pandas)
```
Here are other examples where we use the **DataFrame** functionality to handle arrays, now with more interesting features for us, namely numbers. We set up a matrix
of dimensionality $10\times 5$ and compute the mean value and standard deviation of each column. Similarly, we can perform mathematial operations like squaring the matrix elements and many other operations.
```
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 matrix
rows = 10
cols = 5
a = np.random.randn(rows,cols)
df = pd.DataFrame(a)
display(df)
print(df.mean())
print(df.std())
display(df**2)
```
Thereafter we can select specific columns only and plot final results
```
df.columns = ['First', 'Second', 'Third', 'Fourth', 'Fifth']
df.index = np.arange(10)
display(df)
print(df['Second'].mean() )
print(df.info())
print(df.describe())
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
df.cumsum().plot(lw=2.0, figsize=(10,6))
plt.show()
df.plot.bar(figsize=(10,6), rot=15)
plt.show()
```
We can produce a $4\times 4$ matrix
```
b = np.arange(16).reshape((4,4))
print(b)
df1 = pd.DataFrame(b)
print(df1)
```
and many other operations.
The **Series** class is another important class included in
**pandas**. You can view it as a specialization of **DataFrame** but where
we have just a single column of data. It shares many of the same features as _DataFrame. As with **DataFrame**,
most operations are vectorized, achieving thereby a high performance when dealing with computations of arrays, in particular labeled arrays.
As we will see below it leads also to a very concice code close to the mathematical operations we may be interested in.
For multidimensional arrays, we recommend strongly [xarray](http://xarray.pydata.org/en/stable/). **xarray** has much of the same flexibility as **pandas**, but allows for the extension to higher dimensions than two. We will see examples later of the usage of both **pandas** and **xarray**.
## Friday August 21
[Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK3155/h20/forelesningsvideoer/LectureAug21.mp4?vrtx=view-as-webpage) and [Handwritten notes](https://github.com/CompPhysics/MachineLearning/blob/master/doc/HandWrittenNotes/NotesAugust21.pdf)
## Reading Data and fitting
In order to study various Machine Learning algorithms, we need to
access data. Acccessing data is an essential step in all machine
learning algorithms. In particular, setting up the so-called **design
matrix** (to be defined below) is often the first element we need in
order to perform our calculations. To set up the design matrix means
reading (and later, when the calculations are done, writing) data
in various formats, The formats span from reading files from disk,
loading data from databases and interacting with online sources
like web application programming interfaces (APIs).
In handling various input formats, as discussed above, we will mainly stay with **pandas**,
a Python package which allows us, in a seamless and painless way, to
deal with a multitude of formats, from standard **csv** (comma separated
values) files, via **excel**, **html** to **hdf5** formats. With **pandas**
and the **DataFrame** and **Series** functionalities we are able to convert text data
into the calculational formats we need for a specific algorithm. And our code is going to be
pretty close the basic mathematical expressions.
Our first data set is going to be a classic from nuclear physics, namely all
available data on binding energies. Don't be intimidated if you are not familiar with nuclear physics. It serves simply as an example here of a data set.
We will show some of the
strengths of packages like **Scikit-Learn** in fitting nuclear binding energies to
specific functions using linear regression first. Then, as a teaser, we will show you how
you can easily implement other algorithms like decision trees and random forests and neural networks.
But before we really start with nuclear physics data, let's just look at some simpler polynomial fitting cases, such as,
(don't be offended) fitting straight lines!
## Friday August 21
### Simple linear regression model using **scikit-learn**
We start with perhaps our simplest possible example, using **Scikit-Learn** to perform linear regression analysis on a data set produced by us.
What follows is a simple Python code where we have defined a function
$y$ in terms of the variable $x$. Both are defined as vectors with $100$ entries.
The numbers in the vector $\hat{x}$ are given
by random numbers generated with a uniform distribution with entries
$x_i \in [0,1]$ (more about probability distribution functions
later). These values are then used to define a function $y(x)$
(tabulated again as a vector) with a linear dependence on $x$ plus a
random noise added via the normal distribution.
The Numpy functions are imported used the **import numpy as np**
statement and the random number generator for the uniform distribution
is called using the function **np.random.rand()**, where we specificy
that we want $100$ random variables. Using Numpy we define
automatically an array with the specified number of elements, $100$ in
our case. With the Numpy function **randn()** we can compute random
numbers with the normal distribution (mean value $\mu$ equal to zero and
variance $\sigma^2$ set to one) and produce the values of $y$ assuming a linear
dependence as function of $x$
$$
y = 2x+N(0,1),
$$
where $N(0,1)$ represents random numbers generated by the normal
distribution. From **Scikit-Learn** we import then the
**LinearRegression** functionality and make a prediction $\tilde{y} =
\alpha + \beta x$ using the function **fit(x,y)**. We call the set of
data $(\hat{x},\hat{y})$ for our training data. The Python package
**scikit-learn** has also a functionality which extracts the above
fitting parameters $\alpha$ and $\beta$ (see below). Later we will
distinguish between training data and test data.
For plotting we use the Python package
[matplotlib](https://matplotlib.org/) which produces publication
quality figures. Feel free to explore the extensive
[gallery](https://matplotlib.org/gallery/index.html) of examples. In
this example we plot our original values of $x$ and $y$ as well as the
prediction **ypredict** ($\tilde{y}$), which attempts at fitting our
data with a straight line.
The Python code follows here.
```
# Importing various packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
x = np.random.rand(100,1)
y = 2*x+np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
xnew = np.array([[0],[1]])
ypredict = linreg.predict(xnew)
plt.plot(xnew, ypredict, "r-")
plt.plot(x, y ,'ro')
plt.axis([0,1.0,0, 5.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Simple Linear Regression')
plt.show()
```
This example serves several aims. It allows us to demonstrate several
aspects of data analysis and later machine learning algorithms. The
immediate visualization shows that our linear fit is not
impressive. It goes through the data points, but there are many
outliers which are not reproduced by our linear regression. We could
now play around with this small program and change for example the
factor in front of $x$ and the normal distribution. Try to change the
function $y$ to
$$
y = 10x+0.01 \times N(0,1),
$$
where $x$ is defined as before. Does the fit look better? Indeed, by
reducing the role of the noise given by the normal distribution we see immediately that
our linear prediction seemingly reproduces better the training
set. However, this testing 'by the eye' is obviouly not satisfactory in the
long run. Here we have only defined the training data and our model, and
have not discussed a more rigorous approach to the **cost** function.
We need more rigorous criteria in defining whether we have succeeded or
not in modeling our training data. You will be surprised to see that
many scientists seldomly venture beyond this 'by the eye' approach. A
standard approach for the *cost* function is the so-called $\chi^2$
function (a variant of the mean-squared error (MSE))
$$
\chi^2 = \frac{1}{n}
\sum_{i=0}^{n-1}\frac{(y_i-\tilde{y}_i)^2}{\sigma_i^2},
$$
where $\sigma_i^2$ is the variance (to be defined later) of the entry
$y_i$. We may not know the explicit value of $\sigma_i^2$, it serves
however the aim of scaling the equations and make the cost function
dimensionless.
Minimizing the cost function is a central aspect of
our discussions to come. Finding its minima as function of the model
parameters ($\alpha$ and $\beta$ in our case) will be a recurring
theme in these series of lectures. Essentially all machine learning
algorithms we will discuss center around the minimization of the
chosen cost function. This depends in turn on our specific
model for describing the data, a typical situation in supervised
learning. Automatizing the search for the minima of the cost function is a
central ingredient in all algorithms. Typical methods which are
employed are various variants of **gradient** methods. These will be
discussed in more detail later. Again, you'll be surprised to hear that
many practitioners minimize the above function ''by the eye', popularly dubbed as
'chi by the eye'. That is, change a parameter and see (visually and numerically) that
the $\chi^2$ function becomes smaller.
There are many ways to define the cost function. A simpler approach is to look at the relative difference between the training data and the predicted data, that is we define
the relative error (why would we prefer the MSE instead of the relative error?) as
$$
\epsilon_{\mathrm{relative}}= \frac{\vert \hat{y} -\hat{\tilde{y}}\vert}{\vert \hat{y}\vert}.
$$
The squared cost function results in an arithmetic mean-unbiased
estimator, and the absolute-value cost function results in a
median-unbiased estimator (in the one-dimensional case, and a
geometric median-unbiased estimator for the multi-dimensional
case). The squared cost function has the disadvantage that it has the tendency
to be dominated by outliers.
We can modify easily the above Python code and plot the relative error instead
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
x = np.random.rand(100,1)
y = 5*x+0.01*np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
ypredict = linreg.predict(x)
plt.plot(x, np.abs(ypredict-y)/abs(y), "ro")
plt.axis([0,1.0,0.0, 0.5])
plt.xlabel(r'$x$')
plt.ylabel(r'$\epsilon_{\mathrm{relative}}$')
plt.title(r'Relative error')
plt.show()
```
Depending on the parameter in front of the normal distribution, we may
have a small or larger relative error. Try to play around with
different training data sets and study (graphically) the value of the
relative error.
As mentioned above, **Scikit-Learn** has an impressive functionality.
We can for example extract the values of $\alpha$ and $\beta$ and
their error estimates, or the variance and standard deviation and many
other properties from the statistical data analysis.
Here we show an
example of the functionality of **Scikit-Learn**.
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error
x = np.random.rand(100,1)
y = 2.0+ 5*x+0.5*np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
ypredict = linreg.predict(x)
print('The intercept alpha: \n', linreg.intercept_)
print('Coefficient beta : \n', linreg.coef_)
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(y, ypredict))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y, ypredict))
# Mean squared log error
print('Mean squared log error: %.2f' % mean_squared_log_error(y, ypredict) )
# Mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(y, ypredict))
plt.plot(x, ypredict, "r-")
plt.plot(x, y ,'ro')
plt.axis([0.0,1.0,1.5, 7.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Linear Regression fit ')
plt.show()
```
The function **coef** gives us the parameter $\beta$ of our fit while **intercept** yields
$\alpha$. Depending on the constant in front of the normal distribution, we get values near or far from $alpha =2$ and $\beta =5$. Try to play around with different parameters in front of the normal distribution. The function **meansquarederror** gives us the mean square error, a risk metric corresponding to the expected value of the squared (quadratic) error or loss defined as
$$
MSE(\hat{y},\hat{\tilde{y}}) = \frac{1}{n}
\sum_{i=0}^{n-1}(y_i-\tilde{y}_i)^2,
$$
The smaller the value, the better the fit. Ideally we would like to
have an MSE equal zero. The attentive reader has probably recognized
this function as being similar to the $\chi^2$ function defined above.
The **r2score** function computes $R^2$, the coefficient of
determination. It provides a measure of how well future samples are
likely to be predicted by the model. Best possible score is 1.0 and it
can be negative (because the model can be arbitrarily worse). A
constant model that always predicts the expected value of $\hat{y}$,
disregarding the input features, would get a $R^2$ score of $0.0$.
If $\tilde{\hat{y}}_i$ is the predicted value of the $i-th$ sample and $y_i$ is the corresponding true value, then the score $R^2$ is defined as
$$
R^2(\hat{y}, \tilde{\hat{y}}) = 1 - \frac{\sum_{i=0}^{n - 1} (y_i - \tilde{y}_i)^2}{\sum_{i=0}^{n - 1} (y_i - \bar{y})^2},
$$
where we have defined the mean value of $\hat{y}$ as
$$
\bar{y} = \frac{1}{n} \sum_{i=0}^{n - 1} y_i.
$$
Another quantity taht we will meet again in our discussions of regression analysis is
the mean absolute error (MAE), a risk metric corresponding to the expected value of the absolute error loss or what we call the $l1$-norm loss. In our discussion above we presented the relative error.
The MAE is defined as follows
$$
\text{MAE}(\hat{y}, \hat{\tilde{y}}) = \frac{1}{n} \sum_{i=0}^{n-1} \left| y_i - \tilde{y}_i \right|.
$$
We present the
squared logarithmic (quadratic) error
$$
\text{MSLE}(\hat{y}, \hat{\tilde{y}}) = \frac{1}{n} \sum_{i=0}^{n - 1} (\log_e (1 + y_i) - \log_e (1 + \tilde{y}_i) )^2,
$$
where $\log_e (x)$ stands for the natural logarithm of $x$. This error
estimate is best to use when targets having exponential growth, such
as population counts, average sales of a commodity over a span of
years etc.
Finally, another cost function is the Huber cost function used in robust regression.
The rationale behind this possible cost function is its reduced
sensitivity to outliers in the data set. In our discussions on
dimensionality reduction and normalization of data we will meet other
ways of dealing with outliers.
The Huber cost function is defined as
$$
H_{\delta}(a)={\begin{cases}{\frac {1}{2}}{a^{2}}&{\text{for }}|a|\leq \delta ,\\\delta (|a|-{\frac {1}{2}}\delta ),&{\text{otherwise.}}\end{cases}}}.
$$
Here $a=\boldsymbol{y} - \boldsymbol{\tilde{y}}$.
We will discuss in more
detail these and other functions in the various lectures. We conclude this part with another example. Instead of
a linear $x$-dependence we study now a cubic polynomial and use the polynomial regression analysis tools of scikit-learn.
```
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
x=np.linspace(0.02,0.98,200)
noise = np.asarray(random.sample((range(200)),200))
y=x**3*noise
yn=x**3*100
poly3 = PolynomialFeatures(degree=3)
X = poly3.fit_transform(x[:,np.newaxis])
clf3 = LinearRegression()
clf3.fit(X,y)
Xplot=poly3.fit_transform(x[:,np.newaxis])
poly3_plot=plt.plot(x, clf3.predict(Xplot), label='Cubic Fit')
plt.plot(x,yn, color='red', label="True Cubic")
plt.scatter(x, y, label='Data', color='orange', s=15)
plt.legend()
plt.show()
def error(a):
for i in y:
err=(y-yn)/yn
return abs(np.sum(err))/len(err)
print (error(y))
```
### To our real data: nuclear binding energies. Brief reminder on masses and binding energies
Let us now dive into nuclear physics and remind ourselves briefly about some basic features about binding
energies. A basic quantity which can be measured for the ground
states of nuclei is the atomic mass $M(N, Z)$ of the neutral atom with
atomic mass number $A$ and charge $Z$. The number of neutrons is $N$. There are indeed several sophisticated experiments worldwide which allow us to measure this quantity to high precision (parts per million even).
Atomic masses are usually tabulated in terms of the mass excess defined by
$$
\Delta M(N, Z) = M(N, Z) - uA,
$$
where $u$ is the Atomic Mass Unit
$$
u = M(^{12}\mathrm{C})/12 = 931.4940954(57) \hspace{0.1cm} \mathrm{MeV}/c^2.
$$
The nucleon masses are
$$
m_p = 1.00727646693(9)u,
$$
and
$$
m_n = 939.56536(8)\hspace{0.1cm} \mathrm{MeV}/c^2 = 1.0086649156(6)u.
$$
In the [2016 mass evaluation of by W.J.Huang, G.Audi, M.Wang, F.G.Kondev, S.Naimi and X.Xu](http://nuclearmasses.org/resources_folder/Wang_2017_Chinese_Phys_C_41_030003.pdf)
there are data on masses and decays of 3437 nuclei.
The nuclear binding energy is defined as the energy required to break
up a given nucleus into its constituent parts of $N$ neutrons and $Z$
protons. In terms of the atomic masses $M(N, Z)$ the binding energy is
defined by
$$
BE(N, Z) = ZM_H c^2 + Nm_n c^2 - M(N, Z)c^2 ,
$$
where $M_H$ is the mass of the hydrogen atom and $m_n$ is the mass of the neutron.
In terms of the mass excess the binding energy is given by
$$
BE(N, Z) = Z\Delta_H c^2 + N\Delta_n c^2 -\Delta(N, Z)c^2 ,
$$
where $\Delta_H c^2 = 7.2890$ MeV and $\Delta_n c^2 = 8.0713$ MeV.
A popular and physically intuitive model which can be used to parametrize
the experimental binding energies as function of $A$, is the so-called
**liquid drop model**. The ansatz is based on the following expression
$$
BE(N,Z) = a_1A-a_2A^{2/3}-a_3\frac{Z^2}{A^{1/3}}-a_4\frac{(N-Z)^2}{A},
$$
where $A$ stands for the number of nucleons and the $a_i$s are parameters which are determined by a fit
to the experimental data.
To arrive at the above expression we have assumed that we can make the following assumptions:
* There is a volume term $a_1A$ proportional with the number of nucleons (the energy is also an extensive quantity). When an assembly of nucleons of the same size is packed together into the smallest volume, each interior nucleon has a certain number of other nucleons in contact with it. This contribution is proportional to the volume.
* There is a surface energy term $a_2A^{2/3}$. The assumption here is that a nucleon at the surface of a nucleus interacts with fewer other nucleons than one in the interior of the nucleus and hence its binding energy is less. This surface energy term takes that into account and is therefore negative and is proportional to the surface area.
* There is a Coulomb energy term $a_3\frac{Z^2}{A^{1/3}}$. The electric repulsion between each pair of protons in a nucleus yields less binding.
* There is an asymmetry term $a_4\frac{(N-Z)^2}{A}$. This term is associated with the Pauli exclusion principle and reflects the fact that the proton-neutron interaction is more attractive on the average than the neutron-neutron and proton-proton interactions.
We could also add a so-called pairing term, which is a correction term that
arises from the tendency of proton pairs and neutron pairs to
occur. An even number of particles is more stable than an odd number.
### Organizing our data
Let us start with reading and organizing our data.
We start with the compilation of masses and binding energies from 2016.
After having downloaded this file to our own computer, we are now ready to read the file and start structuring our data.
We start with preparing folders for storing our calculations and the data file over masses and binding energies. We import also various modules that we will find useful in order to present various Machine Learning methods. Here we focus mainly on the functionality of **scikit-learn**.
```
# Common imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("MassEval2016.dat"),'r')
```
Before we proceed, we define also a function for making our plots. You can obviously avoid this and simply set up various **matplotlib** commands every time you need them. You may however find it convenient to collect all such commands in one function and simply call this function.
```
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
def MakePlot(x,y, styles, labels, axlabels):
plt.figure(figsize=(10,6))
for i in range(len(x)):
plt.plot(x[i], y[i], styles[i], label = labels[i])
plt.xlabel(axlabels[0])
plt.ylabel(axlabels[1])
plt.legend(loc=0)
```
Our next step is to read the data on experimental binding energies and
reorganize them as functions of the mass number $A$, the number of
protons $Z$ and neutrons $N$ using **pandas**. Before we do this it is
always useful (unless you have a binary file or other types of compressed
data) to actually open the file and simply take a look at it!
In particular, the program that outputs the final nuclear masses is written in Fortran with a specific format. It means that we need to figure out the format and which columns contain the data we are interested in. Pandas comes with a function that reads formatted output. After having admired the file, we are now ready to start massaging it with **pandas**. The file begins with some basic format information.
```
"""
This is taken from the data file of the mass 2016 evaluation.
All files are 3436 lines long with 124 character per line.
Headers are 39 lines long.
col 1 : Fortran character control: 1 = page feed 0 = line feed
format : a1,i3,i5,i5,i5,1x,a3,a4,1x,f13.5,f11.5,f11.3,f9.3,1x,a2,f11.3,f9.3,1x,i3,1x,f12.5,f11.5
These formats are reflected in the pandas widths variable below, see the statement
widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1),
Pandas has also a variable header, with length 39 in this case.
"""
```
The data we are interested in are in columns 2, 3, 4 and 11, giving us
the number of neutrons, protons, mass numbers and binding energies,
respectively. We add also for the sake of completeness the element name. The data are in fixed-width formatted lines and we will
covert them into the **pandas** DataFrame structure.
```
# Read the experimental data with Pandas
Masses = pd.read_fwf(infile, usecols=(2,3,4,6,11),
names=('N', 'Z', 'A', 'Element', 'Ebinding'),
widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1),
header=39,
index_col=False)
# Extrapolated values are indicated by '#' in place of the decimal place, so
# the Ebinding column won't be numeric. Coerce to float and drop these entries.
Masses['Ebinding'] = pd.to_numeric(Masses['Ebinding'], errors='coerce')
Masses = Masses.dropna()
# Convert from keV to MeV.
Masses['Ebinding'] /= 1000
# Group the DataFrame by nucleon number, A.
Masses = Masses.groupby('A')
# Find the rows of the grouped DataFrame with the maximum binding energy.
Masses = Masses.apply(lambda t: t[t.Ebinding==t.Ebinding.max()])
```
We have now read in the data, grouped them according to the variables we are interested in.
We see how easy it is to reorganize the data using **pandas**. If we
were to do these operations in C/C++ or Fortran, we would have had to
write various functions/subroutines which perform the above
reorganizations for us. Having reorganized the data, we can now start
to make some simple fits using both the functionalities in **numpy** and
**Scikit-Learn** afterwards.
Now we define five variables which contain
the number of nucleons $A$, the number of protons $Z$ and the number of neutrons $N$, the element name and finally the energies themselves.
```
A = Masses['A']
Z = Masses['Z']
N = Masses['N']
Element = Masses['Element']
Energies = Masses['Ebinding']
print(Masses)
```
The next step, and we will define this mathematically later, is to set up the so-called **design matrix**. We will throughout call this matrix $\boldsymbol{X}$.
It has dimensionality $p\times n$, where $n$ is the number of data points and $p$ are the so-called predictors. In our case here they are given by the number of polynomials in $A$ we wish to include in the fit.
```
# Now we set up the design matrix X
X = np.zeros((len(A),5))
X[:,0] = 1
X[:,1] = A
X[:,2] = A**(2.0/3.0)
X[:,3] = A**(-1.0/3.0)
X[:,4] = A**(-1.0)
```
With **scikitlearn** we are now ready to use linear regression and fit our data.
```
clf = skl.LinearRegression().fit(X, Energies)
fity = clf.predict(X)
```
Pretty simple!
Now we can print measures of how our fit is doing, the coefficients from the fits and plot the final fit together with our data.
```
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(Energies, fity))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(Energies, fity))
# Mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(Energies, fity))
print(clf.coef_, clf.intercept_)
Masses['Eapprox'] = fity
# Generate a plot comparing the experimental with the fitted values values.
fig, ax = plt.subplots()
ax.set_xlabel(r'$A = N + Z$')
ax.set_ylabel(r'$E_\mathrm{bind}\,/\mathrm{MeV}$')
ax.plot(Masses['A'], Masses['Ebinding'], alpha=0.7, lw=2,
label='Ame2016')
ax.plot(Masses['A'], Masses['Eapprox'], alpha=0.7, lw=2, c='m',
label='Fit')
ax.legend()
save_fig("Masses2016")
plt.show()
```
### Seeing the wood for the trees
As a teaser, let us now see how we can do this with decision trees using **scikit-learn**. Later we will switch to so-called **random forests**!
```
#Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regr_1=DecisionTreeRegressor(max_depth=5)
regr_2=DecisionTreeRegressor(max_depth=7)
regr_3=DecisionTreeRegressor(max_depth=9)
regr_1.fit(X, Energies)
regr_2.fit(X, Energies)
regr_3.fit(X, Energies)
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
y_3=regr_3.predict(X)
Masses['Eapprox'] = y_1
# Plot the results
plt.figure()
plt.plot(A, Energies, color="blue", label="Data", linewidth=2)
plt.plot(A, y_1, color="red", label="max_depth=5", linewidth=2)
plt.plot(A, y_2, color="green", label="max_depth=7", linewidth=2)
plt.plot(A, y_3, color="m", label="max_depth=9", linewidth=2)
plt.xlabel("$A$")
plt.ylabel("$E$[MeV]")
plt.title("Decision Tree Regression")
plt.legend()
save_fig("Masses2016Trees")
plt.show()
print(Masses)
print(np.mean( (Energies-y_1)**2))
```
### And what about using neural networks?
The **seaborn** package allows us to visualize data in an efficient way. Note that we use **scikit-learn**'s multi-layer perceptron (or feed forward neural network)
functionality.
```
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import accuracy_score
import seaborn as sns
X_train = X
Y_train = Energies
n_hidden_neurons = 100
epochs = 100
# store models for later use
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
# store the models for later use
DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
sns.set()
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = MLPRegressor(hidden_layer_sizes=(n_hidden_neurons), activation='logistic',
alpha=lmbd, learning_rate_init=eta, max_iter=epochs)
dnn.fit(X_train, Y_train)
DNN_scikit[i][j] = dnn
train_accuracy[i][j] = dnn.score(X_train, Y_train)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
```
## A first summary
The aim behind these introductory words was to present to you various
Python libraries and their functionalities, in particular libraries like
**numpy**, **pandas**, **xarray** and **matplotlib** and other that make our life much easier
in handling various data sets and visualizing data.
Furthermore,
**Scikit-Learn** allows us with few lines of code to implement popular
Machine Learning algorithms for supervised learning. Later we will meet **Tensorflow**, a powerful library for deep learning.
Now it is time to dive more into the details of various methods. We will start with linear regression and try to take a deeper look at what it entails.
|
github_jupyter
|
import numpy as np
n = 10
x = np.random.normal(size=n)
print(x)
import numpy as np
x = np.array([1, 2, 3])
print(x)
import numpy as np
x = np.log(np.array([4, 7, 8]))
print(x)
import numpy as np
from math import log
x = np.array([4, 7, 8])
for i in range(0, len(x)):
x[i] = log(x[i])
print(x)
import numpy as np
x = np.log(np.array([4, 7, 8], dtype = np.float64))
print(x)
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x)
import numpy as np
x = np.log(np.array([4.0, 7.0, 8.0])
print(x.itemsize)
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
print(A)
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[:,0])
import numpy as np
A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ]))
# print the first column, row-major order and elements start with 0
print(A[1,:])
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to zero
A = np.zeros( (n, n) )
print(A)
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to one
A = np.ones( (n, n) )
print(A)
import numpy as np
n = 10
# define a matrix of dimension 10 x 10 and set all elements to random numbers with x \in [0, 1]
A = np.random.rand(n, n)
print(A)
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
z = x**3+np.random.normal(size=n)
print(np.mean(z))
W = np.vstack((x, y, z))
Sigma = np.cov(W)
print(Sigma)
Eigvals, Eigvecs = np.linalg.eig(Sigma)
print(Eigvals)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
eye = np.eye(4)
print(eye)
sparse_mtx = sparse.csr_matrix(eye)
print(sparse_mtx)
x = np.linspace(-10,10,100)
y = np.sin(x)
plt.plot(x,y,marker='x')
plt.show()
import pandas as pd
from IPython.display import display
data = {'First Name': ["Frodo", "Bilbo", "Aragorn II", "Samwise"],
'Last Name': ["Baggins", "Baggins","Elessar","Gamgee"],
'Place of birth': ["Shire", "Shire", "Eriador", "Shire"],
'Date of Birth T.A.': [2968, 2890, 2931, 2980]
}
data_pandas = pd.DataFrame(data)
display(data_pandas)
data_pandas = pd.DataFrame(data,index=['Frodo','Bilbo','Aragorn','Sam'])
display(data_pandas)
display(data_pandas.loc['Aragorn'])
new_hobbit = {'First Name': ["Peregrin"],
'Last Name': ["Took"],
'Place of birth': ["Shire"],
'Date of Birth T.A.': [2990]
}
data_pandas=data_pandas.append(pd.DataFrame(new_hobbit, index=['Pippin']))
display(data_pandas)
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 matrix
rows = 10
cols = 5
a = np.random.randn(rows,cols)
df = pd.DataFrame(a)
display(df)
print(df.mean())
print(df.std())
display(df**2)
df.columns = ['First', 'Second', 'Third', 'Fourth', 'Fifth']
df.index = np.arange(10)
display(df)
print(df['Second'].mean() )
print(df.info())
print(df.describe())
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
df.cumsum().plot(lw=2.0, figsize=(10,6))
plt.show()
df.plot.bar(figsize=(10,6), rot=15)
plt.show()
b = np.arange(16).reshape((4,4))
print(b)
df1 = pd.DataFrame(b)
print(df1)
# Importing various packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
x = np.random.rand(100,1)
y = 2*x+np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
xnew = np.array([[0],[1]])
ypredict = linreg.predict(xnew)
plt.plot(xnew, ypredict, "r-")
plt.plot(x, y ,'ro')
plt.axis([0,1.0,0, 5.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Simple Linear Regression')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
x = np.random.rand(100,1)
y = 5*x+0.01*np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
ypredict = linreg.predict(x)
plt.plot(x, np.abs(ypredict-y)/abs(y), "ro")
plt.axis([0,1.0,0.0, 0.5])
plt.xlabel(r'$x$')
plt.ylabel(r'$\epsilon_{\mathrm{relative}}$')
plt.title(r'Relative error')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error
x = np.random.rand(100,1)
y = 2.0+ 5*x+0.5*np.random.randn(100,1)
linreg = LinearRegression()
linreg.fit(x,y)
ypredict = linreg.predict(x)
print('The intercept alpha: \n', linreg.intercept_)
print('Coefficient beta : \n', linreg.coef_)
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(y, ypredict))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y, ypredict))
# Mean squared log error
print('Mean squared log error: %.2f' % mean_squared_log_error(y, ypredict) )
# Mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(y, ypredict))
plt.plot(x, ypredict, "r-")
plt.plot(x, y ,'ro')
plt.axis([0.0,1.0,1.5, 7.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Linear Regression fit ')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
x=np.linspace(0.02,0.98,200)
noise = np.asarray(random.sample((range(200)),200))
y=x**3*noise
yn=x**3*100
poly3 = PolynomialFeatures(degree=3)
X = poly3.fit_transform(x[:,np.newaxis])
clf3 = LinearRegression()
clf3.fit(X,y)
Xplot=poly3.fit_transform(x[:,np.newaxis])
poly3_plot=plt.plot(x, clf3.predict(Xplot), label='Cubic Fit')
plt.plot(x,yn, color='red', label="True Cubic")
plt.scatter(x, y, label='Data', color='orange', s=15)
plt.legend()
plt.show()
def error(a):
for i in y:
err=(y-yn)/yn
return abs(np.sum(err))/len(err)
print (error(y))
# Common imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("MassEval2016.dat"),'r')
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
def MakePlot(x,y, styles, labels, axlabels):
plt.figure(figsize=(10,6))
for i in range(len(x)):
plt.plot(x[i], y[i], styles[i], label = labels[i])
plt.xlabel(axlabels[0])
plt.ylabel(axlabels[1])
plt.legend(loc=0)
"""
This is taken from the data file of the mass 2016 evaluation.
All files are 3436 lines long with 124 character per line.
Headers are 39 lines long.
col 1 : Fortran character control: 1 = page feed 0 = line feed
format : a1,i3,i5,i5,i5,1x,a3,a4,1x,f13.5,f11.5,f11.3,f9.3,1x,a2,f11.3,f9.3,1x,i3,1x,f12.5,f11.5
These formats are reflected in the pandas widths variable below, see the statement
widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1),
Pandas has also a variable header, with length 39 in this case.
"""
# Read the experimental data with Pandas
Masses = pd.read_fwf(infile, usecols=(2,3,4,6,11),
names=('N', 'Z', 'A', 'Element', 'Ebinding'),
widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1),
header=39,
index_col=False)
# Extrapolated values are indicated by '#' in place of the decimal place, so
# the Ebinding column won't be numeric. Coerce to float and drop these entries.
Masses['Ebinding'] = pd.to_numeric(Masses['Ebinding'], errors='coerce')
Masses = Masses.dropna()
# Convert from keV to MeV.
Masses['Ebinding'] /= 1000
# Group the DataFrame by nucleon number, A.
Masses = Masses.groupby('A')
# Find the rows of the grouped DataFrame with the maximum binding energy.
Masses = Masses.apply(lambda t: t[t.Ebinding==t.Ebinding.max()])
A = Masses['A']
Z = Masses['Z']
N = Masses['N']
Element = Masses['Element']
Energies = Masses['Ebinding']
print(Masses)
# Now we set up the design matrix X
X = np.zeros((len(A),5))
X[:,0] = 1
X[:,1] = A
X[:,2] = A**(2.0/3.0)
X[:,3] = A**(-1.0/3.0)
X[:,4] = A**(-1.0)
clf = skl.LinearRegression().fit(X, Energies)
fity = clf.predict(X)
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(Energies, fity))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(Energies, fity))
# Mean absolute error
print('Mean absolute error: %.2f' % mean_absolute_error(Energies, fity))
print(clf.coef_, clf.intercept_)
Masses['Eapprox'] = fity
# Generate a plot comparing the experimental with the fitted values values.
fig, ax = plt.subplots()
ax.set_xlabel(r'$A = N + Z$')
ax.set_ylabel(r'$E_\mathrm{bind}\,/\mathrm{MeV}$')
ax.plot(Masses['A'], Masses['Ebinding'], alpha=0.7, lw=2,
label='Ame2016')
ax.plot(Masses['A'], Masses['Eapprox'], alpha=0.7, lw=2, c='m',
label='Fit')
ax.legend()
save_fig("Masses2016")
plt.show()
#Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regr_1=DecisionTreeRegressor(max_depth=5)
regr_2=DecisionTreeRegressor(max_depth=7)
regr_3=DecisionTreeRegressor(max_depth=9)
regr_1.fit(X, Energies)
regr_2.fit(X, Energies)
regr_3.fit(X, Energies)
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
y_3=regr_3.predict(X)
Masses['Eapprox'] = y_1
# Plot the results
plt.figure()
plt.plot(A, Energies, color="blue", label="Data", linewidth=2)
plt.plot(A, y_1, color="red", label="max_depth=5", linewidth=2)
plt.plot(A, y_2, color="green", label="max_depth=7", linewidth=2)
plt.plot(A, y_3, color="m", label="max_depth=9", linewidth=2)
plt.xlabel("$A$")
plt.ylabel("$E$[MeV]")
plt.title("Decision Tree Regression")
plt.legend()
save_fig("Masses2016Trees")
plt.show()
print(Masses)
print(np.mean( (Energies-y_1)**2))
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import accuracy_score
import seaborn as sns
X_train = X
Y_train = Energies
n_hidden_neurons = 100
epochs = 100
# store models for later use
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
# store the models for later use
DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
sns.set()
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
dnn = MLPRegressor(hidden_layer_sizes=(n_hidden_neurons), activation='logistic',
alpha=lmbd, learning_rate_init=eta, max_iter=epochs)
dnn.fit(X_train, Y_train)
DNN_scikit[i][j] = dnn
train_accuracy[i][j] = dnn.score(X_train, Y_train)
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
| 0.413951 | 0.944382 |
```
# Implementation of 1-out-of-2 OT
# Alice has 2 messages and Bob wants to know one of those messages with Alice now knowing which message Bob learns and Bob not knowing the second of the two messages
import time
seed = int(round(time.time() * 10))
# Generator for random number generation
def gen_random_mod(k, modulus):
num = 0
while(num < k):
a = mod(gp.random(), modulus).lift()
while(gcd(a, modulus) != 1):
a = mod(gp.random(), modulus).lift()
yield a
num += 1
class Alice:
def __init__(self, modbits, seed):
set_random_seed(seed)
current_randstate().set_seed_gp()
# use gp.random() to get prng random number
self.p = next_prime(2^modbits + gp.random())
print("p : ", self.p)
self.q = next_prime(2^modbits + gp.random())
print("q : ", self.q)
if self.p == self.q:
print("Primes are same, please run again")
self.n = self.p*self.q
self.phi_n = (self.p-1)*(self.q-1)
# Lifting is necessary here, in order for inverse_mod to work correctly
self.e = mod(gp.random(), self.phi_n).lift()
while gcd(self.e, self.phi_n)!=1:
self.e = mod(gp.random(), self.phi_n).lift()
self.d = inverse_mod(self.e, self.phi_n)
# Alice generates m0 and m1 here.
self.mlist = list(gen_random_mod(2, self.n))
self.m0 = self.mlist[0]
self.m1 = self.mlist[1]
print("Alice's message m0 is : " + str(self.m0))
print("Alice's message m1 is : " + str(self.m1))
# Alice generates x0 and x1 here.
self.xlist = list(gen_random_mod(2, self.n))
def compute_kx(self, v):
klist = []
for i in range(2):
klist.append(mod((v-self.xlist[i]), self.n).lift().powermod(self.d, self.n))
#print(klist)
mprimelist = []
mprimelist.append(mod(self.m0 + klist[0], self.n).lift())
mprimelist.append(mod(self.m1 + klist[1], self.n).lift())
return mprimelist
class Bob:
def __init__(self, xlist, n, e):
self.x0 = xlist[0]
self.x1 = xlist[1]
self.xlist = xlist
self.n = n
self.e = e
temp = [i-1 for i in list(gen_random_mod(1, 3))]
self.b = temp[0]
temp = list(gen_random_mod(1, self.n))
self.k = temp[0]
#print(self.k)
self.v = mod(xlist[self.b], self.n)
self.v = self.v + self.k.powermod(self.e, self.n)
self.v = self.v.lift()
def revelation(self, mprimelist):
self.m = mprimelist[self.b] - self.k
print("Bob's bit (0 or 1) b is : "+str(self.b))
print("Bob could learn the value of m_b as : "+str(self.m))
class OT_Protocol:
def __init__(self, modbits, seed):
alice = Alice(modbits, seed)
bob = Bob(alice.xlist, alice.n, alice.e)
mprimelist = alice.compute_kx(bob.v)
bob.revelation(mprimelist)
if bob.m == alice.mlist[bob.b]:
print("Protocol Finished Correctly!")
else:
print("Error in protocol resuts. Aborted.")
modbits = 12
ot = OT_Protocol(modbits, seed)
```
|
github_jupyter
|
# Implementation of 1-out-of-2 OT
# Alice has 2 messages and Bob wants to know one of those messages with Alice now knowing which message Bob learns and Bob not knowing the second of the two messages
import time
seed = int(round(time.time() * 10))
# Generator for random number generation
def gen_random_mod(k, modulus):
num = 0
while(num < k):
a = mod(gp.random(), modulus).lift()
while(gcd(a, modulus) != 1):
a = mod(gp.random(), modulus).lift()
yield a
num += 1
class Alice:
def __init__(self, modbits, seed):
set_random_seed(seed)
current_randstate().set_seed_gp()
# use gp.random() to get prng random number
self.p = next_prime(2^modbits + gp.random())
print("p : ", self.p)
self.q = next_prime(2^modbits + gp.random())
print("q : ", self.q)
if self.p == self.q:
print("Primes are same, please run again")
self.n = self.p*self.q
self.phi_n = (self.p-1)*(self.q-1)
# Lifting is necessary here, in order for inverse_mod to work correctly
self.e = mod(gp.random(), self.phi_n).lift()
while gcd(self.e, self.phi_n)!=1:
self.e = mod(gp.random(), self.phi_n).lift()
self.d = inverse_mod(self.e, self.phi_n)
# Alice generates m0 and m1 here.
self.mlist = list(gen_random_mod(2, self.n))
self.m0 = self.mlist[0]
self.m1 = self.mlist[1]
print("Alice's message m0 is : " + str(self.m0))
print("Alice's message m1 is : " + str(self.m1))
# Alice generates x0 and x1 here.
self.xlist = list(gen_random_mod(2, self.n))
def compute_kx(self, v):
klist = []
for i in range(2):
klist.append(mod((v-self.xlist[i]), self.n).lift().powermod(self.d, self.n))
#print(klist)
mprimelist = []
mprimelist.append(mod(self.m0 + klist[0], self.n).lift())
mprimelist.append(mod(self.m1 + klist[1], self.n).lift())
return mprimelist
class Bob:
def __init__(self, xlist, n, e):
self.x0 = xlist[0]
self.x1 = xlist[1]
self.xlist = xlist
self.n = n
self.e = e
temp = [i-1 for i in list(gen_random_mod(1, 3))]
self.b = temp[0]
temp = list(gen_random_mod(1, self.n))
self.k = temp[0]
#print(self.k)
self.v = mod(xlist[self.b], self.n)
self.v = self.v + self.k.powermod(self.e, self.n)
self.v = self.v.lift()
def revelation(self, mprimelist):
self.m = mprimelist[self.b] - self.k
print("Bob's bit (0 or 1) b is : "+str(self.b))
print("Bob could learn the value of m_b as : "+str(self.m))
class OT_Protocol:
def __init__(self, modbits, seed):
alice = Alice(modbits, seed)
bob = Bob(alice.xlist, alice.n, alice.e)
mprimelist = alice.compute_kx(bob.v)
bob.revelation(mprimelist)
if bob.m == alice.mlist[bob.b]:
print("Protocol Finished Correctly!")
else:
print("Error in protocol resuts. Aborted.")
modbits = 12
ot = OT_Protocol(modbits, seed)
| 0.390243 | 0.692363 |
```
import random
import copy
import logging
import sys
from run_tests_201204 import *
import os
import sys
import importlib
from collections import defaultdict
sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')
from tools_pattern import get_eucledean_dist
import compress_pickle
import my_plot
from my_plot import MyPlotData, my_box_plot
import seaborn as sns
script_n = 'plot_210404_across_scaled_noise_12k_1500'
db_path = '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/dimensionality_sim/' \
'batch_210404_across_noise/'
data_script = 'batch_210404_across_noise'
scaled_noise = 1
core_noise = 0
n_mfs = 1500
n_grcs = 12000
db = {}
model = 'data'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
# model = 'global_random'
# db[model] = compress_pickle.load(
# db_path+f'{data_script}_{model}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
model = 'random'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
model = 'naive_random_21'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
model = 'naive_random_17'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
avg_grc_dim_list = defaultdict(list)
for ress in db['random']:
ress_tries = ress
for ress in ress_tries:
# print(ress)
for noise in ress:
res = ress[noise]
grc_dim = res['grc_dim']
avg_grc_dim_list[noise].append(grc_dim)
avg_grc_dim = {}
for noise in avg_grc_dim_list:
avg_grc_dim[noise] = sum(avg_grc_dim_list[noise])/len(avg_grc_dim_list[noise])
name_map = {
'data': "Observed",
'global_random': "Global Random",
'random': "Global Random",
'naive_random_17': "Local Random",
'naive_random_21': "Local Random",
}
palette = {
name_map['data']: sns.color_palette()[0],
name_map['global_random']: sns.color_palette()[1],
name_map['random']: sns.color_palette()[1],
name_map['naive_random_17']: sns.color_palette()[2],
name_map['naive_random_21']: sns.color_palette()[2],
}
mpd = MyPlotData()
ress_ref = db['naive_random_17'][0][0]
resss_ref2 = db['naive_random_17'][0]
for model_name in [
# 'global_random',
# 'naive_random_17',
'naive_random_21',
'random',
'data',
]:
ress = db[model_name]
# print(ress)
ress_tries = ress[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
if n_try >= len(resss_ref2):
print(n_try)
continue
ress_ref2 = resss_ref2[n_try]
for noise in ress:
# print(noise)
res = ress[noise]
# res_ref = ress_ref[noise]
res_ref2 = ress_ref2[noise]
# hamming_distance_norm = res['hamming_distance']/res['num_grcs']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/res_ref2['grc_dim'],
grc_dim_norm2=res['grc_dim']/avg_grc_dim[noise],
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
# grc_by_mf_dim_ref=res['grc_dim']/res_ref['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
grc_pop_corr_norm=res['grc_pop_corr']/res_ref2['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
# importlib.reload(my_plot); my_plot.my_relplot(
# mpd,
# x='noise',
# y='grc_dim',
# hue='model',
# context='paper',
# palette=palette,
# linewidth=1,
# log_scale_y=True,
# width=10,
# # ylim=[0, None],
# y_axis_label='Dim. Expansion ($x$)',
# x_axis_label='MF Input Variation (%)',
# title='noise',
# save_filename=f'{script_n}_act_30.svg',
# show=True,
# )
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim_norm2',
hue='model',
context='paper',
palette=palette,
linewidth=1,
# log_scale_y=True,
width=10,
ylim=[.9, 1.1],
y_axis_label='Dim. Expansion ($x$)',
x_axis_label='MF Input Variation (%)',
title='noise',
save_filename=f'{script_n}_act_30.svg',
show=True,
)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim_norm2',
hue='model',
context='paper',
palette=palette,
linewidth=2,
# log_scale_y=True,
# ci=68,
# ci='sd',
ci=68,
width=3.5,
# height=3,
height=1.5,
ylim=[.9, 1.05],
y_axis_label='Rel. Noise Dim. ($x$)',
x_axis_label='Graded Variation (%)',
# title='noise',
save_filename=f'{script_n}_graded_noise.svg',
show=True,
)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_by_mf_dim',
hue='model',
context='paper',
palette=palette,
linewidth=2,
# log_scale_y=True,
# ci='sd',
ci=68,
width=3.5,
height=3,
# ylim=[.9, 1.1],
y_axis_label='Dim. Expansion ($x$)',
x_axis_label='Graded Variation (%)',
# title='noise',
save_filename=f'{script_n}_dim_expansion.svg',
show=True,
)
```
|
github_jupyter
|
import random
import copy
import logging
import sys
from run_tests_201204 import *
import os
import sys
import importlib
from collections import defaultdict
sys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')
from tools_pattern import get_eucledean_dist
import compress_pickle
import my_plot
from my_plot import MyPlotData, my_box_plot
import seaborn as sns
script_n = 'plot_210404_across_scaled_noise_12k_1500'
db_path = '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/dimensionality_sim/' \
'batch_210404_across_noise/'
data_script = 'batch_210404_across_noise'
scaled_noise = 1
core_noise = 0
n_mfs = 1500
n_grcs = 12000
db = {}
model = 'data'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
# model = 'global_random'
# db[model] = compress_pickle.load(
# db_path+f'{data_script}_{model}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
model = 'random'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
model = 'naive_random_21'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
model = 'naive_random_17'
db[model] = compress_pickle.load(
db_path+f'{data_script}_{model}_{n_grcs}_{n_mfs}_scaled_noise_{scaled_noise}_core_noise_{core_noise}_0.3_512_10.gz')
avg_grc_dim_list = defaultdict(list)
for ress in db['random']:
ress_tries = ress
for ress in ress_tries:
# print(ress)
for noise in ress:
res = ress[noise]
grc_dim = res['grc_dim']
avg_grc_dim_list[noise].append(grc_dim)
avg_grc_dim = {}
for noise in avg_grc_dim_list:
avg_grc_dim[noise] = sum(avg_grc_dim_list[noise])/len(avg_grc_dim_list[noise])
name_map = {
'data': "Observed",
'global_random': "Global Random",
'random': "Global Random",
'naive_random_17': "Local Random",
'naive_random_21': "Local Random",
}
palette = {
name_map['data']: sns.color_palette()[0],
name_map['global_random']: sns.color_palette()[1],
name_map['random']: sns.color_palette()[1],
name_map['naive_random_17']: sns.color_palette()[2],
name_map['naive_random_21']: sns.color_palette()[2],
}
mpd = MyPlotData()
ress_ref = db['naive_random_17'][0][0]
resss_ref2 = db['naive_random_17'][0]
for model_name in [
# 'global_random',
# 'naive_random_17',
'naive_random_21',
'random',
'data',
]:
ress = db[model_name]
# print(ress)
ress_tries = ress[0] # get the first element in tuple
# ress = ress[0] # get the first try
for n_try, ress in enumerate(ress_tries):
# print(resss_ref2[0])
# print(resss_ref2.keys())
if n_try >= len(resss_ref2):
print(n_try)
continue
ress_ref2 = resss_ref2[n_try]
for noise in ress:
# print(noise)
res = ress[noise]
# res_ref = ress_ref[noise]
res_ref2 = ress_ref2[noise]
# hamming_distance_norm = res['hamming_distance']/res['num_grcs']
mpd.add_data_point(
model=name_map[model_name],
noise=noise*100,
grc_dim=res['grc_dim'],
grc_dim_norm=res['grc_dim']/res_ref2['grc_dim'],
grc_dim_norm2=res['grc_dim']/avg_grc_dim[noise],
grc_by_mf_dim=res['grc_dim']/res['mf_dim'],
# grc_by_mf_dim_ref=res['grc_dim']/res_ref['mf_dim'],
num_grcs=res['num_grcs'],
num_mfs=res['num_mfs'],
voi=res['voi'],
grc_pop_corr=res['grc_pop_corr'],
grc_pop_corr_norm=res['grc_pop_corr']/res_ref2['grc_pop_corr'],
binary_similarity=res['binary_similarity'],
hamming_distance=res['hamming_distance'],
normalized_mse=res['normalized_mse'],
)
# importlib.reload(my_plot); my_plot.my_relplot(
# mpd,
# x='noise',
# y='grc_dim',
# hue='model',
# context='paper',
# palette=palette,
# linewidth=1,
# log_scale_y=True,
# width=10,
# # ylim=[0, None],
# y_axis_label='Dim. Expansion ($x$)',
# x_axis_label='MF Input Variation (%)',
# title='noise',
# save_filename=f'{script_n}_act_30.svg',
# show=True,
# )
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim_norm2',
hue='model',
context='paper',
palette=palette,
linewidth=1,
# log_scale_y=True,
width=10,
ylim=[.9, 1.1],
y_axis_label='Dim. Expansion ($x$)',
x_axis_label='MF Input Variation (%)',
title='noise',
save_filename=f'{script_n}_act_30.svg',
show=True,
)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_dim_norm2',
hue='model',
context='paper',
palette=palette,
linewidth=2,
# log_scale_y=True,
# ci=68,
# ci='sd',
ci=68,
width=3.5,
# height=3,
height=1.5,
ylim=[.9, 1.05],
y_axis_label='Rel. Noise Dim. ($x$)',
x_axis_label='Graded Variation (%)',
# title='noise',
save_filename=f'{script_n}_graded_noise.svg',
show=True,
)
importlib.reload(my_plot); my_plot.my_relplot(
mpd,
x='noise',
y='grc_by_mf_dim',
hue='model',
context='paper',
palette=palette,
linewidth=2,
# log_scale_y=True,
# ci='sd',
ci=68,
width=3.5,
height=3,
# ylim=[.9, 1.1],
y_axis_label='Dim. Expansion ($x$)',
x_axis_label='Graded Variation (%)',
# title='noise',
save_filename=f'{script_n}_dim_expansion.svg',
show=True,
)
| 0.169612 | 0.10961 |
```
%config IPCompleter.greedy=True
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib as matplot
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import warnings
warnings.filterwarnings("ignore")
from keras.models import Model, load_model
from keras.layers import *
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
from sklearn.metrics import (confusion_matrix, precision_recall_curve, auc,
roc_curve, recall_score, classification_report, f1_score,
precision_recall_fscore_support,accuracy_score)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, VotingClassifier, AdaBoostClassifier
from sklearn.model_selection import train_test_split, cross_val_score, learning_curve, validation_curve
from sklearn.preprocessing import LabelEncoder,normalize,StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from logitboost import LogitBoost
from IPython.display import display, Image, SVG, Math, YouTubeVideo
from data_exploration import explore
from feature_engineering import transformation
import matplotlib as mpl
mpl.rcParams['xtick.labelsize'] = 22
mpl.rcParams['ytick.labelsize'] = 22
mpl.rcParams['figure.figsize'] = (10, 8)
mpl.rcParams['axes.facecolor'] = (0.9,0.9,0.9)
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['axes.grid'] = True
mpl.rcParams['grid.color'] = 'w'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['legend.fontsize'] = 22
mpl.rcParams['legend.facecolor'] = [1,1,1]
mpl.rcParams['legend.framealpha'] = 0.75
mpl.rcParams['axes.labelsize'] = 22
test = pd.read_csv('UNSW_NB15_training-set.csv')
train = pd.read_csv('UNSW_NB15_testing-set.csv')
total_1 = pd.concat([train, test]).drop(['id'],axis=1)
train = train.where(train['service'] == "http").dropna()
test = test.where(test['service'] == "http").dropna()
total = pd.concat([train, test]).drop(['id'],axis=1)
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_categorical_dtype
def reduce_mem_usage(df, use_float16=False):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
# skip datetime type or categorical type
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('object')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def standardize(df):
return (df-df.mean())/df.std()
def min_max(df):
return (df-df.min())/(df.max() - df.min())
def normalize(df):
return pd.Dataframe(preprocessing.normalize(df), columns=df.columns)
total_1 = reduce_mem_usage(total_1)
```
# Data
```
normal = train[train['label']==0]
anomaly = train[train['label']==1]
```
## Checking data types
```
train.head()
train.dtypes
```
* categorical: state, service, proto
* target = attack_cat, label
* integer but categorial = is_sm_ips_ports, ct_state_ttl, is_ftp_login
* integer = spkts, dpkts, sbytes, dbytes, sttl, dttl, sload, dload, sloss, dloss, swin, dwin, stcpb, dtcpb, smean, dmean, trans_depth, response_body_len, ct_srv_src, ct_state_ttl, ct_dst_ltm, ct_src_dport_ltm, ct_dst_sport_ltm, ct_dst_src_ltm, ct_ftp_cmd, ct_flw_http_mthd, ct_src_ltm, ct_srv_dst,
* decimal = dur, rate, sinpkt, dinpkt, sjit, djit, tcprtt, synack, ackdat
# Correlation matrix
Why checking correlation is important ? Check these links:
* [Why Feature Correlation Matters …. A Lot!](https://towardsdatascience.com/why-feature-correlation-matters-a-lot-847e8ba439c4) and
* [Feature selection — Correlation and P-value](https://towardsdatascience.com/feature-selection-correlation-and-p-value-da8921bfb3cf)
```
def show_correlation(data, method='pearson'):
correlation_matrix = data.corr(method='pearson') # ‘pearson’, ‘kendall’, ‘spearman’
fig = plt.figure(figsize=(12,9))
sns.heatmap(correlation_matrix,vmax=0.8,square = True) # annot=True, if fig should show the correlation score too
plt.show()
return correlation_matrix
def top_correlations(correlations, limit=0.9):
columns = correlations.columns
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.iloc[i,j] >= limit:
print(f"{columns[i]} {columns[j]} {correlations.iloc[i,j]}")
def print_correlations(correlations, col1=None, col2=None):
columns = correlations.columns
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if (col1 == None or col1==columns[i]) and (col2 == None or col2==columns[j]):
print(f"{columns[i]} {columns[j]} {correlations.iloc[i,j]}")
return
elif (col1 == None or col1==columns[j]) and (col2 == None or col2==columns[i]):
print(f"{columns[i]} {columns[j]} {correlations.iloc[i,j]}")
return
def find_corr(df1, df2):
return pd.concat([df1, df2], axis=1).corr().iloc[0,1]
def corr(col1, col2='label', df=total):
return pd.concat([df[col1], df[col2]], axis=1).corr().iloc[0,1]
```
## Pearson
```
correlation_matrix = show_correlation(total_1)
top_correlations(correlation_matrix, limit=0.9)
```
## Spearman
```
correlation_matrix = show_correlation(train, method='spearman')
top_correlations(correlation_matrix, limit=0.9)
```
Most correlated features are :
* spkts, sbytes, sloss
* dpkts, dbytes, dloss
* sinpkt, is_sm_ips_ports
* swin, dwin
* tcprtt, synack
* ct_srv_src, ct_srv_dst, ct_dst_src_ltm, ct_src_dport_ltm, ct_dst_sport_ltm
* is_ftp_login ct_ftp_cmd
```
sns.pairplot(total[['spkts', 'sbytes', 'sloss']])
sns.pairplot(total[['dpkts', 'dbytes', 'dloss']])
sns.pairplot(total[['sinpkt', 'is_sm_ips_ports']])
sns.pairplot(total[['swin', 'dwin']])
```
# plot utils
```
def dual_plot(col, data1=normal, data2=anomaly, label1='normal', label2='anomaly', method=None):
if method != None:
sns.distplot(data1[col].apply(method), label=label1, hist=False, rug=True)
sns.distplot(data2[col].apply(method), label=label2, hist=False, rug=True)
else:
sns.distplot(data1[col], label=label1, hist=False, rug=True)
sns.distplot(data2[col], label=label2, hist=False, rug=True)
plt.legend()
def catplot(data, col):
ax = sns.catplot(x=col, hue="label", col="type",data=data, kind="count", height=5, legend=False, aspect=1.4)
ax.set_titles("{col_name}")
ax.add_legend(loc='upper right',labels=['normal','attack'])
plt.show(ax)
```
# Categorical
These four columns are categorical: 'attack_cat', 'state', 'service', 'proto'. Among them 'attack_cat' isn't a feature.
These features are categorical but in integer form : 'is_sm_ips_ports', 'ct_state_ttl', 'is_ftp_login'.
```
def create_count_df(col, data=total):
df = pd.DataFrame(data[col].value_counts().reset_index().values, columns = [col, 'count'])
df['percent'] = df['count'].values*100/data.shape[0]
return df.sort_values(by='percent', ascending=False)
```
## Label
0 for normal and 1 for attack records
```
create_count_df('label', train)
create_count_df('label', test)
```
So it seems the dataset is pretty balanced, unlike real world data where attack scenarios are rare. Moreover, here attack connections are more than normal connections.
## State
Indicates to the state and its dependent protocol, e.g. ACC, CLO, CON, ECO, ECR, FIN, INT, MAS, PAR, REQ, RST, TST, TXD, URH, URN, and (-) (if not used state)
```
col = 'state'
create_count_df(col, test)
```
## Service
http, ftp, smtp, ssh, dns, ftp-data ,irc and (-) if not much used service. More than half of the service data are of - category.
```
col = 'service'
create_count_df(col, train)
total.loc[~total[col].isin(['-', 'dns', 'http', 'smtp', 'ftp-data', 'ftp', 'ssh', 'pop3']), col] = 'others'
```
## proto
Transaction protocol. Normal connections of train data have only 5 protocols, where anomaly connections have 129. So we'll convert all other protocols into same value.
```
col = 'proto'
create_count_df(col, normal)
create_count_df(col, anomaly)[:10]
# icmp and rtp columns are in test, but not in train data
total_1.loc[total_1[col].isin(['igmp', 'icmp', 'rtp']), col] = 'igmp_icmp_rtp'
total_1.loc[~total_1[col].isin(['tcp', 'udp', 'arp', 'ospf', 'igmp_icmp_rtp']), col] = 'others'
```
## is_sm_ips_ports
If source and destination IP addresses equal and port numbers (sport/dport) equal then, this variable takes value 1 else 0. Seems if it is 1, then the connection is always normal. This feature is highly correlated with sinpkt (0.94131890073567).
## is_ftp_login
If the ftp session is accessed by user and password then 1 else 0. In most of the cases session has no user and password. However there are values 2 and 4 which should not be there.
This feature is totally correlated with ct_ftp_cmd, which counts the number of ftp commands. So dropping this column should be ok.
```
col = 'is_ftp_login'
print(corr('ct_ftp_cmd', col), corr('is_ftp_login', 'label'))
catplot(total, col)
total.drop([col], axis=1, inplace=True)
```
# Integer Features
## ct_state_ttl
No. for each state according to specific range of values for source/destination time to live (sttl/dttl).
```
col = 'ct_state_ttl'
catplot(total, col)
```
## ct_ftp_cmd
No of flows that has a command in ftp session. It has a very low correlation with target. Also is_ftp_login is highly correlated with it (0.9988554882922012).
```
catplot(total, 'ct_ftp_cmd')
corr('ct_ftp_cmd', 'label')
```
## ct_flw_http_mthd
No. of flows that has methods such as Get and Post in http service. Seems 0 has more anomaly values, however the correlation is very small with target.
```
col = 'ct_flw_http_mthd'
catplot(total, col)
corr(col) # -0.012237160723
create_count_df(col, total_1)
```
## sbytes & dbytes
* sbytes: Source to destination transaction bytes
* dbytes: Destination to source transaction bytes
These 2 features are higly corelated to number of packets sent (spkts & dpkts). Actually, spkts * smean = sbytes. Also they are closely related to sloss and dloss. So we can drop these 2 here.
```
print(find_corr(total['spkts']*total['smean'], total['sbytes'])) # 0.999999
print(find_corr(total['dpkts']*total['dmean'], total['dbytes'])) # 0.99999
print(corr('sbytes', 'sloss'), corr('dbytes', 'dloss')) # 0.995771577240429, 0.9967111338305503
total.drop(['sbytes', 'dbytes'], axis=1, inplace=True)
```
## smean & dmean
Mean of the packet size transmitted. However is it just sbytes/spkts ? The correlation says it is. So we already have this
info from those other features.
```
dual_plot('smean')
dual_plot('dmean')
total['smean_log1p'] = total['smean'].apply(np.log1p)
total['dmean_log1p'] = total['dmean'].apply(np.log1p)
# -0.02837244879012871 -0.2951728296856902 -0.05807468815031313 -0.5111549621216057
print(corr('smean'), corr('dmean'), corr('smean_log1p'), corr('dmean_log1p'))
# So we have better correlation with label after applying log1p.
total.drop(['smean', 'dmean'], axis=1, inplace=True)
```
## spkts and dpkts
* spkts : Source to destination packet count
* dpkts: Destination to source packet count
```
col = 'spkts'
dual_plot(col)
dual_plot(col, method=np.log1p)
total['spkts_log1p'] = total['spkts'].apply(np.log1p)
total['dpkts_log1p'] = total['dpkts'].apply(np.log1p)
# -0.043040466783819634 -0.09739388286233619 -0.3468819761209388 -0.45005074723539357
print(corr('spkts'), corr('dpkts'), corr('spkts_log1p'), corr('dpkts_log1p'))
# So we have better correlation with label after applying log1p.
total.drop(['spkts', 'dpkts'], axis=1, inplace=True)
```
## sttl & dttl
* sttl: Source to destination time to live value
* dttl: Destination to source time to live value
For sttl most of the anomalies have live values around 65 and 250. Its correlation with the target value is high too.
However, for dttl both types have nearly same distribution. So the correlation with target is very low.
```
col = 'sttl'
dual_plot(col) # 0.62408238, after applying log1p 0.61556952425
col = 'dttl'
dual_plot(col) # corr -0.09859087338578788
```
## sloss & dloss
* sloss: Source packets retransmitted or dropped
* dloss: Destination packets retransmitted or dropped
Sloss is highly correlated with spkts and sbytes (more than .91). Similarly dloss is highly correlated with dpkts and dbytes.
However, though packets sent is related loss of packets, this isn't quite linearly related like packet number and size. So we keep both for now.
Values are mostly between 0 to 3. Yet some values are more than several thousands.
```
dual_plot('sloss')
# So log1p makes it easier to differentiate
dual_plot('sloss', method=np.log1p)
total['sloss_log1p'] = total['sloss'].apply(np.log1p)
total['dloss_log1p'] = total['dloss'].apply(np.log1p)
# 0.001828274080103508 -0.07596097807462938 -0.3454351103223904 -0.3701913238787703
print(corr('sloss'), corr('dloss'), corr('sloss_log1p'), corr('dloss_log1p') )
total.drop(['sloss', 'dloss'], axis=1, inplace= True)
```
## swin & dwin
TCP window advertisement value. Except 0 and 255 other values(1-254) occur mostly once only. So we can separate them into 3 groups. And we also see after binning their correlation with target remains same.
```
total['swin'].value_counts().loc[lambda x: x>1]
total['dwin'].value_counts().loc[lambda x: x>1]
print(corr('swin'), corr('dwin'))
dual_plot('swin')
selected = ['swin', 'dwin']
kbins = preprocessing.KBinsDiscretizer(n_bins=[3, 3], encode='ordinal', strategy='uniform')
total[selected] = pd.DataFrame(kbins.fit_transform(total[selected]), columns=selected)
print(corr('swin'), corr('dwin'))
```
## stcpb & dtcpb
TCP base sequence number. It has a really big range, 0 to 5e9. However, anomaly connections are mostly around 0.
```
col = 'stcpb'
dual_plot(col)
dual_plot(col, method=np.log1p)
total['stcpb_log1p'] = total['stcpb'].apply(np.log1p)
total['dtcpb_log1p'] = total['dtcpb'].apply(np.log1p)
# -0.2665849100492664 -0.2635428109654134 -0.33898970769021913 -0.33835676091281974
print(corr('stcpb'), corr('dtcpb'), corr('stcpb_log1p'), corr('dtcpb_log1p'))
total.drop(['stcpb', 'dtcpb'], axis=1, inplace= True)
```
### tcprtt & synack & ackdat
* tcprtt is the TCP connection setup round-trip time, the sum of ’synack’ and ’ackdat’.
* synack: TCP connection setup time, the time between the SYN and the SYN_ACK packets.
* ackdat : TCP connection setup time, the time between the SYN_ACK and the ACK packets.
As tcprtt, is just the sum of other two features, it doesn't add any extra info to our models. So we can just drop it for now.
Applying preprocessing on synack and ackdat didn't improve much. From graph we can see, anomaly connections generally have values around 0.
```
total.drop(['tcprtt'], axis=1, inplace=True)
dual_plot('synack')
dual_plot('ackdat')
```
## trans_depth
Represents the pipelined depth into the connection of http request/response transaction. After depth 5 to 172 occurences are few.
```
col = 'trans_depth'
print(corr(col)) # -0.0022256544
create_count_df(col, total)
```
## response_body_len
Actual uncompressed content size of the data transferred from the server’s http service.
The values range between 0 to 5.24M.
```
col = 'response_body_len'
dual_plot(col)
total["response_body_len_log1p"] = total["response_body_len"].apply(np.log1p)
# slight improve
# -0.018930127454048158 -0.03261972203078345
print(corr('response_body_len'), corr('response_body_len_log1p'))
total.drop(['response_body_len'], axis=1, inplace=True)
```
## ct_srv_src
No. of connections that contain the same service and source address in 100 connections according to the last time. Most of the normal connections are within 10. It is highly correlated to ct_srv_dst.
```
col = 'ct_srv_src'
print(total[col].value_counts())
print(corr(col)) # 0.24659616767
dual_plot(col)
```
## ct_srv_dst
No. of connections that contain the same service and destination address in 100 connections according to the last time. It is highly correlated to ct_srv_src too. It has a slight better correlation with label than ct_srv_src. So the other one can be dropped to check for possible improvement.
```
col = 'ct_srv_dst'
print(total[col].value_counts())
# graph is same as ct_srv_src
dual_plot(col)
# 0.2478122357. they are very correlated 0.97946681, need to check whether dropping one benefits
print(corr('ct_srv_dst'), corr('ct_srv_src', 'ct_srv_dst'))
```
## ct_src_ltm & ct_dst_ltm
No. of connections of the same source/destination address in 100 connections according to the last recorder time.
Values are well between 0 to 51 and very few values after 48. They are much correlated , but not to the point of dropping one.
```
col = 'ct_src_ltm'
print(corr(col))
create_count_df(col, total)
print(corr('ct_dst_ltm'))
create_count_df('ct_dst_ltm', total)
corr('ct_src_ltm', 'ct_dst_ltm')
```
## ct_src_dport_ltm & ct_dst_sport_ltm
* ct_src_dport_ltm : No of connections of the same source address and the destination port in 100 connections according to the last time.
* ct_dst_sport_ltm: No of connections of the same destination address and the source port in 100 connections according to the last time.
```
for col in ['ct_src_dport_ltm', 'ct_dst_sport_ltm']:
print(corr(col))
print(create_count_df(col, total))
corr('ct_src_dport_ltm', 'ct_dst_sport_ltm')
```
# Decimal Features
## dur
recorded total duration. Normal connections are mostly within 5. However, this feature has a poor correlation with label.
```
col = 'dur'
print(corr(col)) # 0.0290961170, correlation gets worse after log1p
dual_plot(col)
```
## rate
This feature isn't mentioned is feature list. It has value upto 1M. Anomaly connections are mostly around 0.
```
col = 'rate'
print(corr(col))
dual_plot(col) # cor 0.3358, after applying log1p it becomes 0.31581108
```
## sinpkt & dinpkt
* sinpkt: Source interpacket arrival time (mSec)
* dinpkt: Destination interpacket arrival time (mSec)
sinpkt is highly correlated with is_sm_ips_ports (0.9421206). Will dropping one of them benefit ?
```
col = 'sinpkt'
corr(col, 'is_sm_ips_ports')
print(corr(col)) # corr -0.1554536980863
dual_plot(col)
dual_plot(col, method=np.log1p)
dual_plot('dinpkt')
total['sinpkt_log1p'] = total['sinpkt'].apply(np.log1p)
total['dinpkt_log1p'] = total['dinpkt'].apply(np.log1p)
# slight improve in correlation
# -0.1554536980867726 -0.030136042428744566 -0.16119699304378052 -0.07408113676641241
print(corr('sinpkt'), corr('dinpkt'), corr('sinpkt_log1p'), corr('dinpkt_log1p'))
total.drop(['sinpkt', 'dinpkt'], axis=1, inplace= True)
```
## sload & dload
* sload: Source bits per second
* dload: Destination bits per second
The values are really big and in bits.
```
dual_plot('sload')
dual_plot('dload')
total['sload_log1p'] = total['sload'].apply(np.log1p)
total['dload_log1p'] = total['dload'].apply(np.log1p)
# 0.16524867685764016 -0.35216880416636837 0.3397788822586144 -0.5919440288535992
print(corr('sload'), corr('dload'), corr('sload_log1p'), corr('dload_log1p'))
total.drop(['sload', 'dload'], axis=1, inplace=True)
```
## sjit & djit
Source and Destination jitter in mSec. Preprocessing didn't improve anything.
```
dual_plot('sjit')
dual_plot('djit')
```
|
github_jupyter
|
%config IPCompleter.greedy=True
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib as matplot
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import warnings
warnings.filterwarnings("ignore")
from keras.models import Model, load_model
from keras.layers import *
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
from sklearn.metrics import (confusion_matrix, precision_recall_curve, auc,
roc_curve, recall_score, classification_report, f1_score,
precision_recall_fscore_support,accuracy_score)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, VotingClassifier, AdaBoostClassifier
from sklearn.model_selection import train_test_split, cross_val_score, learning_curve, validation_curve
from sklearn.preprocessing import LabelEncoder,normalize,StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from logitboost import LogitBoost
from IPython.display import display, Image, SVG, Math, YouTubeVideo
from data_exploration import explore
from feature_engineering import transformation
import matplotlib as mpl
mpl.rcParams['xtick.labelsize'] = 22
mpl.rcParams['ytick.labelsize'] = 22
mpl.rcParams['figure.figsize'] = (10, 8)
mpl.rcParams['axes.facecolor'] = (0.9,0.9,0.9)
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['axes.grid'] = True
mpl.rcParams['grid.color'] = 'w'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['grid.linestyle'] = '--'
mpl.rcParams['legend.fontsize'] = 22
mpl.rcParams['legend.facecolor'] = [1,1,1]
mpl.rcParams['legend.framealpha'] = 0.75
mpl.rcParams['axes.labelsize'] = 22
test = pd.read_csv('UNSW_NB15_training-set.csv')
train = pd.read_csv('UNSW_NB15_testing-set.csv')
total_1 = pd.concat([train, test]).drop(['id'],axis=1)
train = train.where(train['service'] == "http").dropna()
test = test.where(test['service'] == "http").dropna()
total = pd.concat([train, test]).drop(['id'],axis=1)
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_categorical_dtype
def reduce_mem_usage(df, use_float16=False):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
# skip datetime type or categorical type
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('object')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def standardize(df):
return (df-df.mean())/df.std()
def min_max(df):
return (df-df.min())/(df.max() - df.min())
def normalize(df):
return pd.Dataframe(preprocessing.normalize(df), columns=df.columns)
total_1 = reduce_mem_usage(total_1)
normal = train[train['label']==0]
anomaly = train[train['label']==1]
train.head()
train.dtypes
def show_correlation(data, method='pearson'):
correlation_matrix = data.corr(method='pearson') # ‘pearson’, ‘kendall’, ‘spearman’
fig = plt.figure(figsize=(12,9))
sns.heatmap(correlation_matrix,vmax=0.8,square = True) # annot=True, if fig should show the correlation score too
plt.show()
return correlation_matrix
def top_correlations(correlations, limit=0.9):
columns = correlations.columns
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.iloc[i,j] >= limit:
print(f"{columns[i]} {columns[j]} {correlations.iloc[i,j]}")
def print_correlations(correlations, col1=None, col2=None):
columns = correlations.columns
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if (col1 == None or col1==columns[i]) and (col2 == None or col2==columns[j]):
print(f"{columns[i]} {columns[j]} {correlations.iloc[i,j]}")
return
elif (col1 == None or col1==columns[j]) and (col2 == None or col2==columns[i]):
print(f"{columns[i]} {columns[j]} {correlations.iloc[i,j]}")
return
def find_corr(df1, df2):
return pd.concat([df1, df2], axis=1).corr().iloc[0,1]
def corr(col1, col2='label', df=total):
return pd.concat([df[col1], df[col2]], axis=1).corr().iloc[0,1]
correlation_matrix = show_correlation(total_1)
top_correlations(correlation_matrix, limit=0.9)
correlation_matrix = show_correlation(train, method='spearman')
top_correlations(correlation_matrix, limit=0.9)
sns.pairplot(total[['spkts', 'sbytes', 'sloss']])
sns.pairplot(total[['dpkts', 'dbytes', 'dloss']])
sns.pairplot(total[['sinpkt', 'is_sm_ips_ports']])
sns.pairplot(total[['swin', 'dwin']])
def dual_plot(col, data1=normal, data2=anomaly, label1='normal', label2='anomaly', method=None):
if method != None:
sns.distplot(data1[col].apply(method), label=label1, hist=False, rug=True)
sns.distplot(data2[col].apply(method), label=label2, hist=False, rug=True)
else:
sns.distplot(data1[col], label=label1, hist=False, rug=True)
sns.distplot(data2[col], label=label2, hist=False, rug=True)
plt.legend()
def catplot(data, col):
ax = sns.catplot(x=col, hue="label", col="type",data=data, kind="count", height=5, legend=False, aspect=1.4)
ax.set_titles("{col_name}")
ax.add_legend(loc='upper right',labels=['normal','attack'])
plt.show(ax)
def create_count_df(col, data=total):
df = pd.DataFrame(data[col].value_counts().reset_index().values, columns = [col, 'count'])
df['percent'] = df['count'].values*100/data.shape[0]
return df.sort_values(by='percent', ascending=False)
create_count_df('label', train)
create_count_df('label', test)
col = 'state'
create_count_df(col, test)
col = 'service'
create_count_df(col, train)
total.loc[~total[col].isin(['-', 'dns', 'http', 'smtp', 'ftp-data', 'ftp', 'ssh', 'pop3']), col] = 'others'
col = 'proto'
create_count_df(col, normal)
create_count_df(col, anomaly)[:10]
# icmp and rtp columns are in test, but not in train data
total_1.loc[total_1[col].isin(['igmp', 'icmp', 'rtp']), col] = 'igmp_icmp_rtp'
total_1.loc[~total_1[col].isin(['tcp', 'udp', 'arp', 'ospf', 'igmp_icmp_rtp']), col] = 'others'
col = 'is_ftp_login'
print(corr('ct_ftp_cmd', col), corr('is_ftp_login', 'label'))
catplot(total, col)
total.drop([col], axis=1, inplace=True)
col = 'ct_state_ttl'
catplot(total, col)
catplot(total, 'ct_ftp_cmd')
corr('ct_ftp_cmd', 'label')
col = 'ct_flw_http_mthd'
catplot(total, col)
corr(col) # -0.012237160723
create_count_df(col, total_1)
print(find_corr(total['spkts']*total['smean'], total['sbytes'])) # 0.999999
print(find_corr(total['dpkts']*total['dmean'], total['dbytes'])) # 0.99999
print(corr('sbytes', 'sloss'), corr('dbytes', 'dloss')) # 0.995771577240429, 0.9967111338305503
total.drop(['sbytes', 'dbytes'], axis=1, inplace=True)
dual_plot('smean')
dual_plot('dmean')
total['smean_log1p'] = total['smean'].apply(np.log1p)
total['dmean_log1p'] = total['dmean'].apply(np.log1p)
# -0.02837244879012871 -0.2951728296856902 -0.05807468815031313 -0.5111549621216057
print(corr('smean'), corr('dmean'), corr('smean_log1p'), corr('dmean_log1p'))
# So we have better correlation with label after applying log1p.
total.drop(['smean', 'dmean'], axis=1, inplace=True)
col = 'spkts'
dual_plot(col)
dual_plot(col, method=np.log1p)
total['spkts_log1p'] = total['spkts'].apply(np.log1p)
total['dpkts_log1p'] = total['dpkts'].apply(np.log1p)
# -0.043040466783819634 -0.09739388286233619 -0.3468819761209388 -0.45005074723539357
print(corr('spkts'), corr('dpkts'), corr('spkts_log1p'), corr('dpkts_log1p'))
# So we have better correlation with label after applying log1p.
total.drop(['spkts', 'dpkts'], axis=1, inplace=True)
col = 'sttl'
dual_plot(col) # 0.62408238, after applying log1p 0.61556952425
col = 'dttl'
dual_plot(col) # corr -0.09859087338578788
dual_plot('sloss')
# So log1p makes it easier to differentiate
dual_plot('sloss', method=np.log1p)
total['sloss_log1p'] = total['sloss'].apply(np.log1p)
total['dloss_log1p'] = total['dloss'].apply(np.log1p)
# 0.001828274080103508 -0.07596097807462938 -0.3454351103223904 -0.3701913238787703
print(corr('sloss'), corr('dloss'), corr('sloss_log1p'), corr('dloss_log1p') )
total.drop(['sloss', 'dloss'], axis=1, inplace= True)
total['swin'].value_counts().loc[lambda x: x>1]
total['dwin'].value_counts().loc[lambda x: x>1]
print(corr('swin'), corr('dwin'))
dual_plot('swin')
selected = ['swin', 'dwin']
kbins = preprocessing.KBinsDiscretizer(n_bins=[3, 3], encode='ordinal', strategy='uniform')
total[selected] = pd.DataFrame(kbins.fit_transform(total[selected]), columns=selected)
print(corr('swin'), corr('dwin'))
col = 'stcpb'
dual_plot(col)
dual_plot(col, method=np.log1p)
total['stcpb_log1p'] = total['stcpb'].apply(np.log1p)
total['dtcpb_log1p'] = total['dtcpb'].apply(np.log1p)
# -0.2665849100492664 -0.2635428109654134 -0.33898970769021913 -0.33835676091281974
print(corr('stcpb'), corr('dtcpb'), corr('stcpb_log1p'), corr('dtcpb_log1p'))
total.drop(['stcpb', 'dtcpb'], axis=1, inplace= True)
total.drop(['tcprtt'], axis=1, inplace=True)
dual_plot('synack')
dual_plot('ackdat')
col = 'trans_depth'
print(corr(col)) # -0.0022256544
create_count_df(col, total)
col = 'response_body_len'
dual_plot(col)
total["response_body_len_log1p"] = total["response_body_len"].apply(np.log1p)
# slight improve
# -0.018930127454048158 -0.03261972203078345
print(corr('response_body_len'), corr('response_body_len_log1p'))
total.drop(['response_body_len'], axis=1, inplace=True)
col = 'ct_srv_src'
print(total[col].value_counts())
print(corr(col)) # 0.24659616767
dual_plot(col)
col = 'ct_srv_dst'
print(total[col].value_counts())
# graph is same as ct_srv_src
dual_plot(col)
# 0.2478122357. they are very correlated 0.97946681, need to check whether dropping one benefits
print(corr('ct_srv_dst'), corr('ct_srv_src', 'ct_srv_dst'))
col = 'ct_src_ltm'
print(corr(col))
create_count_df(col, total)
print(corr('ct_dst_ltm'))
create_count_df('ct_dst_ltm', total)
corr('ct_src_ltm', 'ct_dst_ltm')
for col in ['ct_src_dport_ltm', 'ct_dst_sport_ltm']:
print(corr(col))
print(create_count_df(col, total))
corr('ct_src_dport_ltm', 'ct_dst_sport_ltm')
col = 'dur'
print(corr(col)) # 0.0290961170, correlation gets worse after log1p
dual_plot(col)
col = 'rate'
print(corr(col))
dual_plot(col) # cor 0.3358, after applying log1p it becomes 0.31581108
col = 'sinpkt'
corr(col, 'is_sm_ips_ports')
print(corr(col)) # corr -0.1554536980863
dual_plot(col)
dual_plot(col, method=np.log1p)
dual_plot('dinpkt')
total['sinpkt_log1p'] = total['sinpkt'].apply(np.log1p)
total['dinpkt_log1p'] = total['dinpkt'].apply(np.log1p)
# slight improve in correlation
# -0.1554536980867726 -0.030136042428744566 -0.16119699304378052 -0.07408113676641241
print(corr('sinpkt'), corr('dinpkt'), corr('sinpkt_log1p'), corr('dinpkt_log1p'))
total.drop(['sinpkt', 'dinpkt'], axis=1, inplace= True)
dual_plot('sload')
dual_plot('dload')
total['sload_log1p'] = total['sload'].apply(np.log1p)
total['dload_log1p'] = total['dload'].apply(np.log1p)
# 0.16524867685764016 -0.35216880416636837 0.3397788822586144 -0.5919440288535992
print(corr('sload'), corr('dload'), corr('sload_log1p'), corr('dload_log1p'))
total.drop(['sload', 'dload'], axis=1, inplace=True)
dual_plot('sjit')
dual_plot('djit')
| 0.568056 | 0.581897 |
```
import os
project_name = "reco-chef"; branch = "ml1m"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
!pip install -U -q dvc dvc[gdrive]
!cp -r /content/drive/MyDrive/git_credentials/. ~
path = "/content/" + project_name;
!mkdir "{path}"
%cd "{path}"
!git init
!git remote add origin https://github.com/"{account}"/"{project_name}".git
!git pull origin "{branch}"
!git checkout "{branch}"
else:
%cd "{project_path}"
!git status
!dvc status
!git add . && git commit -m 'commit' && git push origin "{branch}"
!dvc pull ./data/bronze/ml-1m/*.dvc
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os,sys,inspect
import gc
from tqdm import tqdm
import random
import heapq
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras import optimizers, callbacks, layers, losses
from tensorflow.keras.layers import Dense, Concatenate, Activation, Add, BatchNormalization, Dropout, Input, Embedding, Flatten, Multiply
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical
from sklearn.metrics.pairwise import cosine_similarity
SEED = 42
np.random.seed(SEED)
tf.random.set_seed(SEED)
os.environ['PYTHONHASHSEED']=str(SEED)
random.seed(SEED)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError as e:
print(e)
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
def mish(x):
return x*tf.math.tanh(tf.math.softplus(x))
def leakyrelu(x, factor=0.2):
return tf.maximum(x, factor*x)
def load_data(filepath, threshold=0):
df = pd.read_csv(filepath,
sep="::",
header=None,
engine='python',
names=['userId', 'movieId', 'rating', 'time'])
df = df.drop('time', axis=1)
df['userId'] = df['userId'].astype(int)
df['movieId'] = df['movieId'].astype(int)
df['rating'] = df['rating'].astype(float)
df = df[['userId', 'movieId', 'rating']]
if threshold > 0:
df['rating'] = np.where(df['rating']>threshold, 1, 0)
else:
df['rating'] = 1.
m_codes = df['movieId'].astype('category').cat.codes
u_codes = df['userId'].astype('category').cat.codes
df['movieId'] = m_codes
df['userId'] = u_codes
return df
def add_negative(df, uiid, times=4):
df_ = df.copy()
user_id = df_['userId'].unique()
item_id = df_['movieId'].unique()
for i in tqdm(user_id):
cnt = 0
n = len(df_[df_['userId']==i])
n_negative = min(n*times, len(item_id)-n-1)
available_negative = list(set(uiid) - set(df[df['userId']==i]['movieId'].values))
new = np.random.choice(available_negative, n_negative, replace=False)
new = [[i, j, 0] for j in new]
df_ = df_.append(pd.DataFrame(new, columns=df.columns), ignore_index=True)
return df_
def extract_from_df(df, n_positive, n_negative):
df_ = df.copy()
rtd = []
user_id = df['userId'].unique()
for i in tqdm(user_id):
rtd += list(np.random.choice(df[df['userId']==i][df['rating']==1]['movieId'].index, n_positive, replace=False))
rtd += list(np.random.choice(df[df['userId']==i][df['rating']==0]['movieId'].index, n_negative, replace=False))
return rtd
def eval_hit(model, df, test, user_id, item_ids, top_k):
df = pd.concat([df, test])
items = list(set(item_ids) - set(df[df['userId']==user_id][df['rating']==1]['movieId'].values))
np.random.shuffle(items)
items = items[:99]
items.append(test[test['userId']==user_id]['movieId'].values[0])
items = np.array(items).reshape(-1, 1)
user = np.full(len(items), user_id).reshape(-1, 1)
preds = model.predict([user, items]).flatten()
item_to_pred = {item: pred for item, pred in zip(items.flatten(), preds)}
top_k = heapq.nlargest(top_k, item_to_pred, key=item_to_pred.get)
if items[-1][0] in top_k:
return 1
return 0
def eval_NDCG(model, df, test, user_id, item_ids, top_k):
df = pd.concat([df, test])
items = list(set(item_ids) - set(df[df['userId']==user_id][df['rating']==1]['movieId'].values))
np.random.shuffle(items)
items = items[:99]
items.append(test[test['userId']==user_id]['movieId'].values[0])
items = np.array(items).reshape(-1, 1)
user = np.full(len(items), user_id).reshape(-1, 1)
preds = model.predict([user, items]).flatten()
item_to_pred = {item: pred for item, pred in zip(items.flatten(), preds)}
top_k = heapq.nlargest(top_k, item_to_pred, key=item_to_pred.get)
for i, item in enumerate(top_k, 1):
if item == test[test['userId']==user_id]['movieId'].values:
return 1 / np.log2(i+1)
return 0
def eval_hit_wrapper(model, df, test, item_ids, top_k):
def f(user_id):
return eval_hit(model, df, test, user_id, item_ids, top_k)
return f
def eval_NDCG_wrapper(model, df, test, item_ids, top_k):
def f(user_id):
return eval_NDCG(model, df, test, user_id, item_ids, top_k)
return f
```
## CML
### Load data
```
df = load_data('./data/bronze/ml-1m/ratings.dat', threshold=0)
df.head()
```
### Preprocessing
```
uuid = df['userId'].unique()
uiid = df['movieId'].unique()
rtd = extract_from_df(df, 1, 0)
train = df.drop(rtd)
test = df.loc[rtd]
u_i = pd.pivot_table(train, index='userId', columns='movieId', values='rating').fillna(0)
u_i
groups = []
for i in range(len(u_i)):
groups.append(list(np.argwhere(u_i.values[i]).flatten()))
# groups = np.array(groups)
```
### Gensim model
```
from gensim.models import Word2Vec
model = Word2Vec(
np.array(groups),
size = 32,
window=10,
min_count=1,
sg=1,
negative=5)
model.build_vocab(np.array(groups))
model.train(np.array(groups),
total_examples = model.corpus_count,
epochs=100,
compute_loss=True)
embedding_matrix = model.wv[model.wv.key_to_index.keys()]
embedding_matrix.shape
from sklearn.metrics.pairwise import cosine_similarity
def get_average(user_id, model=model, embedding=embedding_matrix):
seen_movies = train[train['userId']==user_id]['movieId'].values
kdx = []
for i in seen_movies:
kdx.append(model.wv.key_to_index[i])
vec = embedding_matrix[kdx]
vec = np.mean(vec, 0)
return vec
def top_n(user_id, k=10, uiid=uiid, model=model):
seen_movies = train[train['userId']==user_id]['movieId'].values
unseen_movies = list(set(uiid) - set(seen_movies))
user_vec = get_average(user_id)
kdx = []
for i in unseen_movies:
kdx.append(model.wv.key_to_index[i])
unseen_vec = embedding_matrix[kdx]
res = sorted(unseen_movies, key=lambda x: cosine_similarity([embedding_matrix[model.wv.key_to_index[x]]], [user_vec]), reverse=True)
return np.array(res[:k])
cnt = 0
for i in range(len(test)):
user, item, _ = test.values[i]
pred = top_n(user, 10)
if item in pred:
cnt += 1
cnt / len(test)
```
|
github_jupyter
|
import os
project_name = "reco-chef"; branch = "ml1m"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
!pip install -U -q dvc dvc[gdrive]
!cp -r /content/drive/MyDrive/git_credentials/. ~
path = "/content/" + project_name;
!mkdir "{path}"
%cd "{path}"
!git init
!git remote add origin https://github.com/"{account}"/"{project_name}".git
!git pull origin "{branch}"
!git checkout "{branch}"
else:
%cd "{project_path}"
!git status
!dvc status
!git add . && git commit -m 'commit' && git push origin "{branch}"
!dvc pull ./data/bronze/ml-1m/*.dvc
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os,sys,inspect
import gc
from tqdm import tqdm
import random
import heapq
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras import optimizers, callbacks, layers, losses
from tensorflow.keras.layers import Dense, Concatenate, Activation, Add, BatchNormalization, Dropout, Input, Embedding, Flatten, Multiply
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical
from sklearn.metrics.pairwise import cosine_similarity
SEED = 42
np.random.seed(SEED)
tf.random.set_seed(SEED)
os.environ['PYTHONHASHSEED']=str(SEED)
random.seed(SEED)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError as e:
print(e)
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
def mish(x):
return x*tf.math.tanh(tf.math.softplus(x))
def leakyrelu(x, factor=0.2):
return tf.maximum(x, factor*x)
def load_data(filepath, threshold=0):
df = pd.read_csv(filepath,
sep="::",
header=None,
engine='python',
names=['userId', 'movieId', 'rating', 'time'])
df = df.drop('time', axis=1)
df['userId'] = df['userId'].astype(int)
df['movieId'] = df['movieId'].astype(int)
df['rating'] = df['rating'].astype(float)
df = df[['userId', 'movieId', 'rating']]
if threshold > 0:
df['rating'] = np.where(df['rating']>threshold, 1, 0)
else:
df['rating'] = 1.
m_codes = df['movieId'].astype('category').cat.codes
u_codes = df['userId'].astype('category').cat.codes
df['movieId'] = m_codes
df['userId'] = u_codes
return df
def add_negative(df, uiid, times=4):
df_ = df.copy()
user_id = df_['userId'].unique()
item_id = df_['movieId'].unique()
for i in tqdm(user_id):
cnt = 0
n = len(df_[df_['userId']==i])
n_negative = min(n*times, len(item_id)-n-1)
available_negative = list(set(uiid) - set(df[df['userId']==i]['movieId'].values))
new = np.random.choice(available_negative, n_negative, replace=False)
new = [[i, j, 0] for j in new]
df_ = df_.append(pd.DataFrame(new, columns=df.columns), ignore_index=True)
return df_
def extract_from_df(df, n_positive, n_negative):
df_ = df.copy()
rtd = []
user_id = df['userId'].unique()
for i in tqdm(user_id):
rtd += list(np.random.choice(df[df['userId']==i][df['rating']==1]['movieId'].index, n_positive, replace=False))
rtd += list(np.random.choice(df[df['userId']==i][df['rating']==0]['movieId'].index, n_negative, replace=False))
return rtd
def eval_hit(model, df, test, user_id, item_ids, top_k):
df = pd.concat([df, test])
items = list(set(item_ids) - set(df[df['userId']==user_id][df['rating']==1]['movieId'].values))
np.random.shuffle(items)
items = items[:99]
items.append(test[test['userId']==user_id]['movieId'].values[0])
items = np.array(items).reshape(-1, 1)
user = np.full(len(items), user_id).reshape(-1, 1)
preds = model.predict([user, items]).flatten()
item_to_pred = {item: pred for item, pred in zip(items.flatten(), preds)}
top_k = heapq.nlargest(top_k, item_to_pred, key=item_to_pred.get)
if items[-1][0] in top_k:
return 1
return 0
def eval_NDCG(model, df, test, user_id, item_ids, top_k):
df = pd.concat([df, test])
items = list(set(item_ids) - set(df[df['userId']==user_id][df['rating']==1]['movieId'].values))
np.random.shuffle(items)
items = items[:99]
items.append(test[test['userId']==user_id]['movieId'].values[0])
items = np.array(items).reshape(-1, 1)
user = np.full(len(items), user_id).reshape(-1, 1)
preds = model.predict([user, items]).flatten()
item_to_pred = {item: pred for item, pred in zip(items.flatten(), preds)}
top_k = heapq.nlargest(top_k, item_to_pred, key=item_to_pred.get)
for i, item in enumerate(top_k, 1):
if item == test[test['userId']==user_id]['movieId'].values:
return 1 / np.log2(i+1)
return 0
def eval_hit_wrapper(model, df, test, item_ids, top_k):
def f(user_id):
return eval_hit(model, df, test, user_id, item_ids, top_k)
return f
def eval_NDCG_wrapper(model, df, test, item_ids, top_k):
def f(user_id):
return eval_NDCG(model, df, test, user_id, item_ids, top_k)
return f
df = load_data('./data/bronze/ml-1m/ratings.dat', threshold=0)
df.head()
uuid = df['userId'].unique()
uiid = df['movieId'].unique()
rtd = extract_from_df(df, 1, 0)
train = df.drop(rtd)
test = df.loc[rtd]
u_i = pd.pivot_table(train, index='userId', columns='movieId', values='rating').fillna(0)
u_i
groups = []
for i in range(len(u_i)):
groups.append(list(np.argwhere(u_i.values[i]).flatten()))
# groups = np.array(groups)
from gensim.models import Word2Vec
model = Word2Vec(
np.array(groups),
size = 32,
window=10,
min_count=1,
sg=1,
negative=5)
model.build_vocab(np.array(groups))
model.train(np.array(groups),
total_examples = model.corpus_count,
epochs=100,
compute_loss=True)
embedding_matrix = model.wv[model.wv.key_to_index.keys()]
embedding_matrix.shape
from sklearn.metrics.pairwise import cosine_similarity
def get_average(user_id, model=model, embedding=embedding_matrix):
seen_movies = train[train['userId']==user_id]['movieId'].values
kdx = []
for i in seen_movies:
kdx.append(model.wv.key_to_index[i])
vec = embedding_matrix[kdx]
vec = np.mean(vec, 0)
return vec
def top_n(user_id, k=10, uiid=uiid, model=model):
seen_movies = train[train['userId']==user_id]['movieId'].values
unseen_movies = list(set(uiid) - set(seen_movies))
user_vec = get_average(user_id)
kdx = []
for i in unseen_movies:
kdx.append(model.wv.key_to_index[i])
unseen_vec = embedding_matrix[kdx]
res = sorted(unseen_movies, key=lambda x: cosine_similarity([embedding_matrix[model.wv.key_to_index[x]]], [user_vec]), reverse=True)
return np.array(res[:k])
cnt = 0
for i in range(len(test)):
user, item, _ = test.values[i]
pred = top_n(user, 10)
if item in pred:
cnt += 1
cnt / len(test)
| 0.362969 | 0.197754 |
## Exploratory Data Analysis
```
import numpy as np
import pandas as pd
pd.set_option('max_columns', 150)
# matplotlib and seaborn for plotting
import matplotlib
matplotlib.rcParams['figure.dpi'] = 120 #resolution
matplotlib.rcParams['figure.figsize'] = (8,6) #figure size
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
color = sns.color_palette()
root = 'C:/Data/instacart-market-basket-analysis/'
```
## Data
```
aisles = pd.read_csv(root + 'aisles.csv')
departments = pd.read_csv(root + 'departments.csv')
orders = pd.read_csv(root + 'orders.csv')
order_products_prior = pd.read_csv(root + 'order_products__prior.csv')
order_products_train = pd.read_csv(root + 'order_products__train.csv')
products = pd.read_csv(root + 'products.csv')
```
Checking above dataframes
```
aisles.head()
departments.head()
products.head()
orders.head()
order_products_prior.head()
order_products_train.head()
```
## Data preparation
For Analysis, I am combining order_products_train and order_products_prior as order_products. Based on order_id, product_id, aisle_id and department_id I will merge those data with order_products and will create one single dataframe.
```
order_products = order_products_prior.append(order_products_train)
order_products.shape
order_products = order_products.merge(products, on ='product_id', how='left')
order_products = order_products.merge(aisles, on ='aisle_id', how='left')
order_products = order_products.merge(departments, on ='department_id', how='left')
order_products = order_products.merge(orders, on='order_id', how='left')
order_products.shape
order_products.head()
order_products.tail()
order_products.info()
```
#### Reducing memory usage for faster analysis
```
def reduce_memory(df):
"""
This function reduce the dataframe memory usage by converting it's type for easier handling.
Parameters: Dataframe
Return: Dataframe
"""
start_mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage of properties dataframe is :",start_mem_usg," MB")
for col in df.columns:
if df[col].dtypes in ["int64", "int32", "int16"]:
cmin = df[col].min()
cmax = df[col].max()
if cmin > np.iinfo(np.int8).min and cmax < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif cmin > np.iinfo(np.int16).min and cmax < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif cmin > np.iinfo(np.int32).min and cmax < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
if df[col].dtypes in ["float64", "float32"]:
cmin = df[col].min()
cmax = df[col].max()
if cmin > np.finfo(np.float16).min and cmax < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif cmin > np.finfo(np.float32).min and cmax < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
print("")
print("___MEMORY USAGE AFTER COMPLETION:___")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage is: ",mem_usg," MB")
print("This is ",100*mem_usg/start_mem_usg,"% of the initial size")
return df
order_products = reduce_memory(order_products)
```
#### Deleting varibales that I do not need anymore
```
del products, orders, order_products_prior, order_products_train, aisles, departments, reduce_memory, root
%whos
```
## Analysis
```
order_products.head()
```
### Aisles
```
temp_df = order_products.groupby("aisle")["reordered"].agg(['count', 'sum']).rename(columns = {'count':'total','sum':'reorders'})
temp_df = temp_df.sort_values('total', ascending=False).reset_index()
```
#### Total Orders and Reorders From Most Popular Aisles
```
fig, ax = plt.subplots(figsize = (15,8))
ax = sns.barplot(y = temp_df.aisle[0:20], x = temp_df.total[0:20], color=color[0], label = "total")
ax = sns.barplot(y = temp_df.aisle[0:20], x = temp_df.reorders[0:20], color=color[3], label = "reordered")
ax.set_ylabel("Aisle")
ax.set_xlabel("Orders Count")
ax.set_title("Total Orders and Reorders From Most Popular Aisles")
ax.legend(loc = 4, prop={'size': 12})
plt.show()
temp_df["reorder_ratio"] = temp_df.reorders/temp_df.total
temp_df = temp_df.sort_values("reorder_ratio", ascending=False).reset_index()
```
#### 20 Aisles with Highest Reorder Ratio
```
fig, ax = plt.subplots(figsize = (13,8))
ax = sns.barplot(y = temp_df.aisle[0:20], x = temp_df.reorder_ratio[0:20], color=color[0])
ax.set_ylabel("Aisles")
ax.set_xlabel("Reorder Ratio")
ax.set_title("Aisles with Highest Reorder Ratio")
ax.tick_params(axis = 'both', labelsize = 12)
plt.show()
```
#### 20 Aisles with Lowest Reorder Ratio
```
fig, ax = plt.subplots(figsize = (13,8))
ax = sns.barplot(y = temp_df.aisle[-21:], x = temp_df.reorder_ratio[-21:], color=color[0])
ax.set_ylabel("Aisles")
ax.set_xlabel("Reorder Ratio")
ax.set_title("Aisles with Lowest Reorder Ratio")
ax.tick_params(axis = 'both', labelsize = 12)
plt.show()
```
### Departments
```
temp_df = order_products.groupby("department")["reordered"].agg(['count', 'sum']).rename(columns = {'count':'total','sum':'reorders'})
temp_df = temp_df.sort_values('total', ascending=False).reset_index()
```
#### Total Orders and Reorders From Departments
```
fig, ax = plt.subplots(figsize = (15,8))
ax = sns.barplot(y = temp_df.department, x = temp_df["total"], color=color[0], label = "total")
ax = sns.barplot(y = temp_df.department, x = temp_df["reorders"], color=color[3], label = "reordered")
ax.set_ylabel("Department")
ax.set_xlabel("Frequency")
ax.legend(loc = 4, prop={'size': 12})
ax.set_title("Total Orders and Reorders From Departments")
plt.show()
temp_df["reorder_ratio"] = temp_df.reorders/temp_df.total
temp_df = temp_df.sort_values("reorder_ratio", ascending=False).reset_index()
```
#### Departments with Highest Reorder Ratio
```
fig, ax = plt.subplots(figsize = (13,8))
ax = sns.barplot(y = temp_df.department, x = temp_df.reorder_ratio, color=color[0])
ax.set_ylabel("Departments")
ax.set_xlabel("Reorder Ratio")
ax.set_title("Departments with Highest Reorder Ratio")
ax.tick_params(axis = 'both', labelsize = 12)
plt.show()
```
### Products
```
temp_df = order_products.groupby("product_name")["reordered"].agg(['count', 'sum']).rename(columns = {'count':'total','sum':'reorders'})
temp_df = temp_df.sort_values('total', ascending=False).reset_index()
```
#### Most Popular Products
```
fig, ax = plt.subplots(figsize = (10,7))
ax = sns.barplot(y = temp_df.product_name[0:20], x = temp_df.total[0:20], color=color[0], label = "total")
ax = sns.barplot(y = temp_df.product_name[0:20], x = temp_df.reorders[0:20], color=color[3], label = "reordered")
ax.set_ylabel("Product")
ax.set_xlabel("Total Orders")
ax.set_title("Most Popular Products")
ax.legend(loc = 4, prop={'size': 12})
plt.show()
temp_df["reorder_ratio"] = temp_df.reorders/temp_df.total
temp_df.sort_values("reorder_ratio", ascending=False).head(10)
```
#### Product Users
```
product_unique_users = order_products.groupby('product_name')['user_id'].nunique().reset_index().rename(columns={'user_id':'total_users'})
product_unique_users.sort_values('total_users', ascending = False).head(10)
product_unique_users = product_unique_users.merge(temp_df, on='product_name', how='left')
product_unique_users.sort_values("reorder_ratio", ascending=False).head(20)
```
#### Cumulative Sum of Unique Users Per Product
```
temp_df = product_unique_users.sort_values("total_users", ascending=False)
temp_df['cum_users'] = temp_df['total_users'].cumsum()
temp_df = temp_df.reset_index(drop=True)
temp_df.head()
fig, ax = plt.subplots(figsize=(15,8))
ax = sns.lineplot(x = temp_df.index, y=temp_df.cum_users)
ax.set_xlabel("Products", size = 9)
ax.set_ylabel("Cumulative Sum of Unique Users", size = 9)
ax.set_title("Cumulative Sum of Unique Users Per Product", size = 12)
plt.show()
```
#### Total Product Orders VS Total Unique Product Buyers
```
fig, ax = plt.subplots(figsize=(15,8))
ax = sns.scatterplot(y = product_unique_users.total, x = product_unique_users.total_users)
ax.set_xlabel("Product Buyers", size = 9)
ax.set_ylabel("Number of Product Purchased", size = 9)
ax.set_title("Total Product Orders VS Total Unique Product Buyers", size = 12)
plt.show()
```
#### Reorder Percentage VS Total Orders
```
fig, ax = plt.subplots(figsize=(10,5))
ax = sns.scatterplot(x = product_unique_users.total, y = product_unique_users.reorder_ratio, color = color[3])
ax.set_xlabel("Number of Products Purchased")
ax.set_ylabel("Reorder Percentage")
ax.set_title("Reorder Percentage VS Total Orders")
plt.show()
```
#### Reorder Percentage VS Total Unique Users
```
fig, ax = plt.subplots(figsize=(10,5))
ax = sns.scatterplot(x = product_unique_users.total_users, y = product_unique_users.reorder_ratio, color = color[0])
ax.set_xlabel("Total Unique Users")
ax.set_ylabel("Reorder Percentage")
ax.set_title("Reorder Percentage VS Total Unique Users")
plt.show()
```
#### Organic Vs Inorganic
```
product_unique_users['Organic'] = product_unique_users.product_name.str.contains("Organic")
product_unique_users.head()
fig, ax = plt.subplots(figsize = (5,5))
ax = sns.barplot(x = product_unique_users.groupby('Organic').size().index, y = product_unique_users.groupby('Organic').size().values)
ax.set_xlabel("Organic Product", size = 9)
ax.set_ylabel("Total Products", size = 9)
ax.set_title("Total Organic and Inorganic products", size = 10)
plt.show()
fig, ax = plt.subplots(figsize = (5,5))
ax = sns.barplot(x = product_unique_users.groupby('Organic')['reorder_ratio'].mean().index, y = product_unique_users.groupby('Organic')['reorder_ratio'].mean().values)
ax.set_xlabel("Organic Product", size = 9)
ax.set_ylabel("Mean reorder ratio", size = 9)
ax.set_title("Mean Reorder Ratio of Organic/Inorganic Products", size = 10)
plt.show()
```
#### Add to Cart Order VS Reorder
```
temp_df = order_products.groupby('add_to_cart_order')['reordered'].mean().reset_index()
temp_df.head()
fig, ax = plt.subplots(figsize=(13,6))
ax = sns.lineplot(x=temp_df.add_to_cart_order, y=temp_df.reordered, lw = 1, marker='o')
ax.set_xlabel("Add to Cart Order")
ax.set_ylabel("Reorder Ratio")
ax.set_title("Add to Cart Order VS Reorder Ratio")
plt.show()
```
#### Most Popular Products on different days
```
temp_df = order_products.groupby(['order_dow', 'product_name']).size().reset_index(name='counts')
temp_df = temp_df.sort_values(['order_dow', 'counts'], ascending=[True, False])
temp_df = temp_df.groupby('order_dow').head(5).reset_index(drop = True)
ax = sns.catplot(x="order_dow", y="counts", hue="product_name", data=temp_df, kind="bar", legend=False)
ax.add_legend(title="Product")
ax.set_axis_labels("Day of Week", "Total Orders of Most Frequent Products")
ax.fig.suptitle("Most Popular Products on different Days of Week", va="baseline", ha="center")
ax.savefig("Most Popular Products on Different Days of Week.png")
```
|
github_jupyter
|
import numpy as np
import pandas as pd
pd.set_option('max_columns', 150)
# matplotlib and seaborn for plotting
import matplotlib
matplotlib.rcParams['figure.dpi'] = 120 #resolution
matplotlib.rcParams['figure.figsize'] = (8,6) #figure size
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
color = sns.color_palette()
root = 'C:/Data/instacart-market-basket-analysis/'
aisles = pd.read_csv(root + 'aisles.csv')
departments = pd.read_csv(root + 'departments.csv')
orders = pd.read_csv(root + 'orders.csv')
order_products_prior = pd.read_csv(root + 'order_products__prior.csv')
order_products_train = pd.read_csv(root + 'order_products__train.csv')
products = pd.read_csv(root + 'products.csv')
aisles.head()
departments.head()
products.head()
orders.head()
order_products_prior.head()
order_products_train.head()
order_products = order_products_prior.append(order_products_train)
order_products.shape
order_products = order_products.merge(products, on ='product_id', how='left')
order_products = order_products.merge(aisles, on ='aisle_id', how='left')
order_products = order_products.merge(departments, on ='department_id', how='left')
order_products = order_products.merge(orders, on='order_id', how='left')
order_products.shape
order_products.head()
order_products.tail()
order_products.info()
def reduce_memory(df):
"""
This function reduce the dataframe memory usage by converting it's type for easier handling.
Parameters: Dataframe
Return: Dataframe
"""
start_mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage of properties dataframe is :",start_mem_usg," MB")
for col in df.columns:
if df[col].dtypes in ["int64", "int32", "int16"]:
cmin = df[col].min()
cmax = df[col].max()
if cmin > np.iinfo(np.int8).min and cmax < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif cmin > np.iinfo(np.int16).min and cmax < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif cmin > np.iinfo(np.int32).min and cmax < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
if df[col].dtypes in ["float64", "float32"]:
cmin = df[col].min()
cmax = df[col].max()
if cmin > np.finfo(np.float16).min and cmax < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif cmin > np.finfo(np.float32).min and cmax < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
print("")
print("___MEMORY USAGE AFTER COMPLETION:___")
mem_usg = df.memory_usage().sum() / 1024**2
print("Memory usage is: ",mem_usg," MB")
print("This is ",100*mem_usg/start_mem_usg,"% of the initial size")
return df
order_products = reduce_memory(order_products)
del products, orders, order_products_prior, order_products_train, aisles, departments, reduce_memory, root
%whos
order_products.head()
temp_df = order_products.groupby("aisle")["reordered"].agg(['count', 'sum']).rename(columns = {'count':'total','sum':'reorders'})
temp_df = temp_df.sort_values('total', ascending=False).reset_index()
fig, ax = plt.subplots(figsize = (15,8))
ax = sns.barplot(y = temp_df.aisle[0:20], x = temp_df.total[0:20], color=color[0], label = "total")
ax = sns.barplot(y = temp_df.aisle[0:20], x = temp_df.reorders[0:20], color=color[3], label = "reordered")
ax.set_ylabel("Aisle")
ax.set_xlabel("Orders Count")
ax.set_title("Total Orders and Reorders From Most Popular Aisles")
ax.legend(loc = 4, prop={'size': 12})
plt.show()
temp_df["reorder_ratio"] = temp_df.reorders/temp_df.total
temp_df = temp_df.sort_values("reorder_ratio", ascending=False).reset_index()
fig, ax = plt.subplots(figsize = (13,8))
ax = sns.barplot(y = temp_df.aisle[0:20], x = temp_df.reorder_ratio[0:20], color=color[0])
ax.set_ylabel("Aisles")
ax.set_xlabel("Reorder Ratio")
ax.set_title("Aisles with Highest Reorder Ratio")
ax.tick_params(axis = 'both', labelsize = 12)
plt.show()
fig, ax = plt.subplots(figsize = (13,8))
ax = sns.barplot(y = temp_df.aisle[-21:], x = temp_df.reorder_ratio[-21:], color=color[0])
ax.set_ylabel("Aisles")
ax.set_xlabel("Reorder Ratio")
ax.set_title("Aisles with Lowest Reorder Ratio")
ax.tick_params(axis = 'both', labelsize = 12)
plt.show()
temp_df = order_products.groupby("department")["reordered"].agg(['count', 'sum']).rename(columns = {'count':'total','sum':'reorders'})
temp_df = temp_df.sort_values('total', ascending=False).reset_index()
fig, ax = plt.subplots(figsize = (15,8))
ax = sns.barplot(y = temp_df.department, x = temp_df["total"], color=color[0], label = "total")
ax = sns.barplot(y = temp_df.department, x = temp_df["reorders"], color=color[3], label = "reordered")
ax.set_ylabel("Department")
ax.set_xlabel("Frequency")
ax.legend(loc = 4, prop={'size': 12})
ax.set_title("Total Orders and Reorders From Departments")
plt.show()
temp_df["reorder_ratio"] = temp_df.reorders/temp_df.total
temp_df = temp_df.sort_values("reorder_ratio", ascending=False).reset_index()
fig, ax = plt.subplots(figsize = (13,8))
ax = sns.barplot(y = temp_df.department, x = temp_df.reorder_ratio, color=color[0])
ax.set_ylabel("Departments")
ax.set_xlabel("Reorder Ratio")
ax.set_title("Departments with Highest Reorder Ratio")
ax.tick_params(axis = 'both', labelsize = 12)
plt.show()
temp_df = order_products.groupby("product_name")["reordered"].agg(['count', 'sum']).rename(columns = {'count':'total','sum':'reorders'})
temp_df = temp_df.sort_values('total', ascending=False).reset_index()
fig, ax = plt.subplots(figsize = (10,7))
ax = sns.barplot(y = temp_df.product_name[0:20], x = temp_df.total[0:20], color=color[0], label = "total")
ax = sns.barplot(y = temp_df.product_name[0:20], x = temp_df.reorders[0:20], color=color[3], label = "reordered")
ax.set_ylabel("Product")
ax.set_xlabel("Total Orders")
ax.set_title("Most Popular Products")
ax.legend(loc = 4, prop={'size': 12})
plt.show()
temp_df["reorder_ratio"] = temp_df.reorders/temp_df.total
temp_df.sort_values("reorder_ratio", ascending=False).head(10)
product_unique_users = order_products.groupby('product_name')['user_id'].nunique().reset_index().rename(columns={'user_id':'total_users'})
product_unique_users.sort_values('total_users', ascending = False).head(10)
product_unique_users = product_unique_users.merge(temp_df, on='product_name', how='left')
product_unique_users.sort_values("reorder_ratio", ascending=False).head(20)
temp_df = product_unique_users.sort_values("total_users", ascending=False)
temp_df['cum_users'] = temp_df['total_users'].cumsum()
temp_df = temp_df.reset_index(drop=True)
temp_df.head()
fig, ax = plt.subplots(figsize=(15,8))
ax = sns.lineplot(x = temp_df.index, y=temp_df.cum_users)
ax.set_xlabel("Products", size = 9)
ax.set_ylabel("Cumulative Sum of Unique Users", size = 9)
ax.set_title("Cumulative Sum of Unique Users Per Product", size = 12)
plt.show()
fig, ax = plt.subplots(figsize=(15,8))
ax = sns.scatterplot(y = product_unique_users.total, x = product_unique_users.total_users)
ax.set_xlabel("Product Buyers", size = 9)
ax.set_ylabel("Number of Product Purchased", size = 9)
ax.set_title("Total Product Orders VS Total Unique Product Buyers", size = 12)
plt.show()
fig, ax = plt.subplots(figsize=(10,5))
ax = sns.scatterplot(x = product_unique_users.total, y = product_unique_users.reorder_ratio, color = color[3])
ax.set_xlabel("Number of Products Purchased")
ax.set_ylabel("Reorder Percentage")
ax.set_title("Reorder Percentage VS Total Orders")
plt.show()
fig, ax = plt.subplots(figsize=(10,5))
ax = sns.scatterplot(x = product_unique_users.total_users, y = product_unique_users.reorder_ratio, color = color[0])
ax.set_xlabel("Total Unique Users")
ax.set_ylabel("Reorder Percentage")
ax.set_title("Reorder Percentage VS Total Unique Users")
plt.show()
product_unique_users['Organic'] = product_unique_users.product_name.str.contains("Organic")
product_unique_users.head()
fig, ax = plt.subplots(figsize = (5,5))
ax = sns.barplot(x = product_unique_users.groupby('Organic').size().index, y = product_unique_users.groupby('Organic').size().values)
ax.set_xlabel("Organic Product", size = 9)
ax.set_ylabel("Total Products", size = 9)
ax.set_title("Total Organic and Inorganic products", size = 10)
plt.show()
fig, ax = plt.subplots(figsize = (5,5))
ax = sns.barplot(x = product_unique_users.groupby('Organic')['reorder_ratio'].mean().index, y = product_unique_users.groupby('Organic')['reorder_ratio'].mean().values)
ax.set_xlabel("Organic Product", size = 9)
ax.set_ylabel("Mean reorder ratio", size = 9)
ax.set_title("Mean Reorder Ratio of Organic/Inorganic Products", size = 10)
plt.show()
temp_df = order_products.groupby('add_to_cart_order')['reordered'].mean().reset_index()
temp_df.head()
fig, ax = plt.subplots(figsize=(13,6))
ax = sns.lineplot(x=temp_df.add_to_cart_order, y=temp_df.reordered, lw = 1, marker='o')
ax.set_xlabel("Add to Cart Order")
ax.set_ylabel("Reorder Ratio")
ax.set_title("Add to Cart Order VS Reorder Ratio")
plt.show()
temp_df = order_products.groupby(['order_dow', 'product_name']).size().reset_index(name='counts')
temp_df = temp_df.sort_values(['order_dow', 'counts'], ascending=[True, False])
temp_df = temp_df.groupby('order_dow').head(5).reset_index(drop = True)
ax = sns.catplot(x="order_dow", y="counts", hue="product_name", data=temp_df, kind="bar", legend=False)
ax.add_legend(title="Product")
ax.set_axis_labels("Day of Week", "Total Orders of Most Frequent Products")
ax.fig.suptitle("Most Popular Products on different Days of Week", va="baseline", ha="center")
ax.savefig("Most Popular Products on Different Days of Week.png")
| 0.463687 | 0.857112 |
# Training Deep Neural Networks on a GPU with PyTorch
### Part 4 of "PyTorch: Zero to GANs"
This post is the fourth in a series of tutorials on building deep learning models with PyTorch, an open source neural networks library. Check out the full series:
1. [PyTorch Basics: Tensors & Gradients](https://jovian.ml/aakashns/01-pytorch-basics)
2. [Linear Regression & Gradient Descent](https://jovian.ml/aakashns/02-linear-regression)
3. [Image Classfication using Logistic Regression](https://jovian.ml/aakashns/03-logistic-regression)
4. [Training Deep Neural Networks on a GPU](https://jovian.ml/aakashns/04-feedforward-nn)
5. Coming soon.. (CNNs, RNNs, GANs etc.)
In [the previous tutorial](https://jvn.io/aakashns/a1b40b04f5174a18bd05b17e3dffb0f0), we trained a logistic regression model to identify handwritten digits from the MNIST dataset with an accuracy of around 86%.

However, we also noticed that it's quite difficult to improve the accuracy beyond 87%, due to the limited power of the model. In this post, we'll try to improve upon it using a *feedforward neural network*.
## System Setup
If you want to follow along and run the code as you read, you can clone this notebook, install the required dependencies using conda, and start Jupyter by running the following commands on the terminal:
```
pip install jovian --upgrade # Install the jovian library
jovian clone fdaae0bf32cf4917a931ac415a5c31b0 # Download notebook
cd 04-feedforward-nn # Enter the created directory
jovian install # Install the dependencies
conda activate 04-feedfoward-nn # Activate virtual env
jupyter notebook # Start Jupyter
```
On older versions of conda, you might need to run `source activate 04-feedfoward-nn` to activate the virtual environment. For a more detailed explanation of the above steps, check out the System setup section in [the first notebook](https://jvn.io/aakashns/e5cfe043873f4f3c9287507016747ae5).
## Preparing the Data
The data preparation is identical to the [previous tutorial](https://jvn.io/aakashns/a1b40b04f5174a18bd05b17e3dffb0f0). We begin by importing the required modules & classes.
```
import torch
import numpy as np
import torchvision
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.dataloader import DataLoader
```
We download the data and create a PyTorch dataset using the `MNIST` class from `torchvision.datasets`.
```
dataset = MNIST(root='data/',
download=True,
transform=ToTensor())
```
Next, we define and use a function `split_indices` to pick a random 20% fraction of the images for the validation set.
```
def split_indices(n, val_pct):
# Determine size of validation set
n_val = int(val_pct*n)
# Create random permutation of 0 to n-1
idxs = np.random.permutation(n)
# Pick first n_val indices for validation set
return idxs[n_val:], idxs[:n_val]
train_indices, val_indices = split_indices(len(dataset), val_pct=0.2)
print(len(train_indices), len(val_indices))
print('Sample val indices: ', val_indices[:20])
```
We can now create PyTorch data loaders for each of the subsets using a `SubsetRandomSampler`, which samples elements randomly from a given list of indices, while creating batches of data.
```
batch_size=100
# Training sampler and data loader
train_sampler = SubsetRandomSampler(train_indices)
train_dl = DataLoader(dataset,
batch_size,
sampler=train_sampler)
# Validation sampler and data loader
valid_sampler = SubsetRandomSampler(val_indices)
valid_dl = DataLoader(dataset,
batch_size,
sampler=valid_sampler)
```
## Model
To improve upon [logistic regression](https://jvn.io/aakashns/a1b40b04f5174a18bd05b17e3dffb0f0), we'll create a neural network with one **hidden layer**. Here's what this means:
* Instead of using a single `nn.Linear` object to transform a batch of inputs (pixel intensities) into a batch of outputs (class probabilities), we'll use two `nn.Linear` objects. Each of these is called a layer in the network.
* The first layer (also known as the hidden layer) will transform the input matrix of shape `batch_size x 784` into an intermediate output matrix of shape `batch_size x hidden_size`, where `hidden_size` is a preconfigured parameter (e.g. 32 or 64).
* The intermediate outputs are then passed into a non-linear *activation function*, which operates on individual elements of the output matrix.
* The result of the activation function, which is also of size `batch_size x hidden_size`, is passed into the second layer (also knowns as the output layer), which transforms it into a matrix of size `batch_size x 10`, identical to the output of the logistic regression model.
Introducing a hidden layer and an activation function allows the model to learn more complex, multi-layered and non-linear relationships between the inputs and the targets. Here's what it looks like visually:

The activation function we'll use here is called a **Rectified Linear Unit** or **ReLU**, and it has a really simple formula: `relu(x) = max(0,x)` i.e. if an element is negative, we replace it by 0, otherwise we leave it unchanged.
To define the model, we extend the `nn.Module` class, just as we did with logistic regression.
```
import torch.nn.functional as F
import torch.nn as nn
class MnistModel(nn.Module):
"""Feedfoward neural network with 1 hidden layer"""
def __init__(self, in_size, hidden_size, out_size):
super().__init__()
# hidden layer
self.linear1 = nn.Linear(in_size, hidden_size)
# output layer
self.linear2 = nn.Linear(hidden_size, out_size)
def forward(self, xb):
# Flatten the image tensors
xb = xb.view(xb.size(0), -1)
# Get intermediate outputs using hidden layer
out = self.linear1(xb)
# Apply activation function
out = F.relu(out)
# Get predictions using output layer
out = self.linear2(out)
return out
```
We'll create a model that contains a hidden layer with 32 activations.
```
input_size = 784
num_classes = 10
model = MnistModel(input_size, hidden_size=32,
out_size=num_classes)
```
Let's take a look at the model's parameters. We expect to see one weight and bias matrix for each of the layers.
```
for t in model.parameters():
print(t.shape)
```
Let's try and generate some outputs using our model. We'll take the first batch of 100 images from our dataset, and pass them into our model.
```
for images, labels in train_dl:
outputs = model(images)
loss = F.cross_entropy(outputs, labels)
print('Loss:', loss.item())
break
print('outputs.shape : ', outputs.shape)
print('Sample outputs :\n', outputs[:2].data)
```
## Using a GPU
As the sizes of our models and datasets increase, we need to use GPUs to train our models within a reasonable amount of time. GPUs contain hundreds of cores that are optimized for performing expensive matrix operations on floating point numbers in a short time, which makes them ideal for training deep neural networks with many layers. You can use GPUs for free on [Kaggle kernels](https://www.kaggle.com/kernels) or [Google Colab](https://colab.research.google.com/), or rent GPU-powered machines on services like [Google Cloud Platform](https://cloud.google.com/gpu/), [Amazon Web Services](https://docs.aws.amazon.com/dlami/latest/devguide/gpu.html) or [Paperspace](https://www.paperspace.com/).
We can check if a GPU is available and the required NVIDIA CUDA drivers are installed using `torch.cuda.is_available`.
```
torch.cuda.is_available()
```
Let's define a helper function to ensure that our code uses the GPU if available, and defaults to using the CPU if it isn't.
```
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
```
Next, let's define a function that can move data and model to a chosen device.
```
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
for images, labels in train_dl:
print(images.shape)
images = to_device(images, device)
print(images.device)
break
```
Finally, we define a `DeviceDataLoader` class to wrap our existing data loaders and move data to the selected device, as a batches are accessed. Interestingly, we don't need to extend an existing class to create a PyTorch dataloader. All we need is an `__iter__` method to retrieve batches of data, and an `__len__` method to get the number of batches.
```
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
```
We can now wrap our data loaders using `DeviceDataLoader`.
```
train_dl = DeviceDataLoader(train_dl, device)
valid_dl = DeviceDataLoader(valid_dl, device)
```
Tensors that have been moved to the GPU's RAM have a `device` property which includes the word `cuda`. Let's verify this by looking at a batch of data from `valid_dl`.
```
for xb, yb in valid_dl:
print('xb.device:', xb.device)
print('yb:', yb)
break
```
## Training the Model
As with logistic regression, we can use cross entropy as the loss function and accuracy as the evaluation metric for our model. The training loop is also identical, so we can reuse the `loss_batch`, `evaluate` and `fit` functions from the previous tutorial.
The `loss_batch` function calculates the loss and metric value for a batch of data, and optionally performs gradient descent if an optimizer is provided.
```
def loss_batch(model, loss_func, xb, yb, opt=None, metric=None):
# Generate predictions
preds = model(xb)
# Calculate loss
loss = loss_func(preds, yb)
if opt is not None:
# Compute gradients
loss.backward()
# Update parameters
opt.step()
# Reset gradients
opt.zero_grad()
metric_result = None
if metric is not None:
# Compute the metric
metric_result = metric(preds, yb)
return loss.item(), len(xb), metric_result
```
The `evaluate` function calculates the overall loss (and a metric, if provided) for the validation set.
```
def evaluate(model, loss_fn, valid_dl, metric=None):
with torch.no_grad():
# Pass each batch through the model
results = [loss_batch(model, loss_fn, xb, yb, metric=metric)
for xb,yb in valid_dl]
# Separate losses, counts and metrics
losses, nums, metrics = zip(*results)
# Total size of the dataset
total = np.sum(nums)
# Avg. loss across batches
avg_loss = np.sum(np.multiply(losses, nums)) / total
avg_metric = None
if metric is not None:
# Avg. of metric across batches
avg_metric = np.sum(np.multiply(metrics, nums)) / total
return avg_loss, total, avg_metric
```
The `fit` function contains the actual training loop, as defined ni the previous tutorials. We'll make a couple more enhancements to the `fit` function:
* Instead of the defining the optimizer manually, we'll pass in the learning rate and create an optimizer inside the `fit` function. This will allows us to train the model with different learning rates, if required.
* We'll record the validation loss and accuracy at the end of every epoch, and return the history as the output of the `fit` function.
```
def fit(epochs, lr, model, loss_fn, train_dl,
valid_dl, metric=None, opt_fn=None):
losses, metrics = [], []
# Instantiate the optimizer
if opt_fn is None: opt_fn = torch.optim.SGD
opt = torch.optim.SGD(model.parameters(), lr=lr)
for epoch in range(epochs):
# Training
for xb,yb in train_dl:
loss,_,_ = loss_batch(model, loss_fn, xb, yb, opt)
# Evaluation
result = evaluate(model, loss_fn, valid_dl, metric)
val_loss, total, val_metric = result
# Record the loss & metric
losses.append(val_loss)
metrics.append(val_metric)
# Print progress
if metric is None:
print('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch+1, epochs, val_loss))
else:
print('Epoch [{}/{}], Loss: {:.4f}, {}: {:.4f}'
.format(epoch+1, epochs, val_loss,
metric.__name__, val_metric))
return losses, metrics
```
We also define an `accuracy` function which calculates the overall accuracy of the model on an entire batch of outputs, so that we can use it as a metric in `fit`.
```
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.sum(preds == labels).item() / len(preds)
```
Before we train the model, we need to ensure that the data and the model's parameters (weights and biases) are on the same device (CPU or GPU). We can reuse the `to_device` function to move the model's parameters to the right device.
```
# Model (on GPU)
model = MnistModel(input_size, hidden_size=32, out_size=num_classes)
to_device(model, device)
```
Let's see how the model performs on the validation set with the initial set of weights and biases.
```
val_loss, total, val_acc = evaluate(model, F.cross_entropy,
valid_dl, metric=accuracy)
print('Loss: {:.4f}, Accuracy: {:.4f}'.format(val_loss, val_acc))
```
The initial accuracy is around 10%, which is what one might expect from a randomly intialized model (since it has a 1 in 10 chance of getting a label right by guessing randomly).
We are now ready to train the model. Let's train for 5 epochs and look at the results. We can use a relatively higher learning of 0.5.
```
losses1, metrics1 = fit(5, 0.5, model, F.cross_entropy,
train_dl, valid_dl, accuracy)
```
95% is pretty good! Let's train the model for 5 more epochs at a lower learning rate of 0.1, to further improve the accuracy.
```
losses2, metrics2 = fit(5, 0.1, model, F.cross_entropy,
train_dl, valid_dl, accuracy)
```
We can now plot the accuracies to study how the model improves over time.
```
import matplotlib.pyplot as plt
# Replace these values with your results
accuracies = [val_acc] + metrics1 + metrics2
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
```
Our current model outperforms the logistic regression model (which could only reach around 86% accuracy) by a huge margin! It quickly reaches an accuracy of 96%, but doesn't improve much beyond this. To improve the accuracy further, we need to make the model more powerful. As you can probably guess, this can be achieved by increasing the size of the hidden layer, or adding more hidden layers. I encourage you to try out both these approaches and see which one works better.
## Commit and upload the notebook
As a final step, we can save and commit our work using the jovian library.
```
!pip install jovian --upgrade -q
import jovian
jovian.commit()
```
## Summary and Further Reading
Here is a summary of the topics covered in this tutorial:
* We created a neural network with one hidden layer to improve upon the logistic regression model from the previous tutorial. We also used the ReLU activation function to introduce non-linearity into the model, allowing it to learn more complex relationships between the inputs (pixel densities) and outputs (class probabilities).
* We defined some utilities like `get_default_device`, `to_device` and `DeviceDataLoader` to leverage a GPU if available, by moving the input data and model parameters to the appropriate device.
* We were able to use the exact same training loop: the `fit` function we had define earlier to train out model and evaluate it using the validation dataset.
There's a lot of scope to experiment here, and I encourage you to use the interactive nature of Jupyter to play around with the various parameters. Here are a few ideas:
* Try changing the size of the hidden layer, or add more hidden layers and see if you can achieve a higher accuracy.
* Try changing the batch size and learning rate to see if you can achieve the same accuracy in fewer epochs.
* Compare the training times on a CPU vs. GPU. Do you see a significant difference. How does it vary with the size of the dataset and the size of the model (no. of weights and parameters)?
* Try building a model for a different dataset, such as the [CIFAR10 or CIFAR100 datasets](https://www.cs.toronto.edu/~kriz/cifar.html).
Here are some references for further reading:
* [A visual proof that neural networks can compute any function](http://neuralnetworksanddeeplearning.com/chap4.html), also known as the Universal Approximation Theorem.
* [But what *is* a neural network?](https://www.youtube.com/watch?v=aircAruvnKk) - A visual and intuitive introduction to what neural networks are and what the intermediate layers represent
* [Stanford CS229 Lecture notes on Backpropagation](http://cs229.stanford.edu/notes/cs229-notes-backprop.pdf) - for a more mathematical treatment of how gradients are calculated and weights are updated for neural networks with multiple layers.
|
github_jupyter
|
pip install jovian --upgrade # Install the jovian library
jovian clone fdaae0bf32cf4917a931ac415a5c31b0 # Download notebook
cd 04-feedforward-nn # Enter the created directory
jovian install # Install the dependencies
conda activate 04-feedfoward-nn # Activate virtual env
jupyter notebook # Start Jupyter
import torch
import numpy as np
import torchvision
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.dataloader import DataLoader
dataset = MNIST(root='data/',
download=True,
transform=ToTensor())
def split_indices(n, val_pct):
# Determine size of validation set
n_val = int(val_pct*n)
# Create random permutation of 0 to n-1
idxs = np.random.permutation(n)
# Pick first n_val indices for validation set
return idxs[n_val:], idxs[:n_val]
train_indices, val_indices = split_indices(len(dataset), val_pct=0.2)
print(len(train_indices), len(val_indices))
print('Sample val indices: ', val_indices[:20])
batch_size=100
# Training sampler and data loader
train_sampler = SubsetRandomSampler(train_indices)
train_dl = DataLoader(dataset,
batch_size,
sampler=train_sampler)
# Validation sampler and data loader
valid_sampler = SubsetRandomSampler(val_indices)
valid_dl = DataLoader(dataset,
batch_size,
sampler=valid_sampler)
import torch.nn.functional as F
import torch.nn as nn
class MnistModel(nn.Module):
"""Feedfoward neural network with 1 hidden layer"""
def __init__(self, in_size, hidden_size, out_size):
super().__init__()
# hidden layer
self.linear1 = nn.Linear(in_size, hidden_size)
# output layer
self.linear2 = nn.Linear(hidden_size, out_size)
def forward(self, xb):
# Flatten the image tensors
xb = xb.view(xb.size(0), -1)
# Get intermediate outputs using hidden layer
out = self.linear1(xb)
# Apply activation function
out = F.relu(out)
# Get predictions using output layer
out = self.linear2(out)
return out
input_size = 784
num_classes = 10
model = MnistModel(input_size, hidden_size=32,
out_size=num_classes)
for t in model.parameters():
print(t.shape)
for images, labels in train_dl:
outputs = model(images)
loss = F.cross_entropy(outputs, labels)
print('Loss:', loss.item())
break
print('outputs.shape : ', outputs.shape)
print('Sample outputs :\n', outputs[:2].data)
torch.cuda.is_available()
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
for images, labels in train_dl:
print(images.shape)
images = to_device(images, device)
print(images.device)
break
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
train_dl = DeviceDataLoader(train_dl, device)
valid_dl = DeviceDataLoader(valid_dl, device)
for xb, yb in valid_dl:
print('xb.device:', xb.device)
print('yb:', yb)
break
def loss_batch(model, loss_func, xb, yb, opt=None, metric=None):
# Generate predictions
preds = model(xb)
# Calculate loss
loss = loss_func(preds, yb)
if opt is not None:
# Compute gradients
loss.backward()
# Update parameters
opt.step()
# Reset gradients
opt.zero_grad()
metric_result = None
if metric is not None:
# Compute the metric
metric_result = metric(preds, yb)
return loss.item(), len(xb), metric_result
def evaluate(model, loss_fn, valid_dl, metric=None):
with torch.no_grad():
# Pass each batch through the model
results = [loss_batch(model, loss_fn, xb, yb, metric=metric)
for xb,yb in valid_dl]
# Separate losses, counts and metrics
losses, nums, metrics = zip(*results)
# Total size of the dataset
total = np.sum(nums)
# Avg. loss across batches
avg_loss = np.sum(np.multiply(losses, nums)) / total
avg_metric = None
if metric is not None:
# Avg. of metric across batches
avg_metric = np.sum(np.multiply(metrics, nums)) / total
return avg_loss, total, avg_metric
def fit(epochs, lr, model, loss_fn, train_dl,
valid_dl, metric=None, opt_fn=None):
losses, metrics = [], []
# Instantiate the optimizer
if opt_fn is None: opt_fn = torch.optim.SGD
opt = torch.optim.SGD(model.parameters(), lr=lr)
for epoch in range(epochs):
# Training
for xb,yb in train_dl:
loss,_,_ = loss_batch(model, loss_fn, xb, yb, opt)
# Evaluation
result = evaluate(model, loss_fn, valid_dl, metric)
val_loss, total, val_metric = result
# Record the loss & metric
losses.append(val_loss)
metrics.append(val_metric)
# Print progress
if metric is None:
print('Epoch [{}/{}], Loss: {:.4f}'
.format(epoch+1, epochs, val_loss))
else:
print('Epoch [{}/{}], Loss: {:.4f}, {}: {:.4f}'
.format(epoch+1, epochs, val_loss,
metric.__name__, val_metric))
return losses, metrics
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.sum(preds == labels).item() / len(preds)
# Model (on GPU)
model = MnistModel(input_size, hidden_size=32, out_size=num_classes)
to_device(model, device)
val_loss, total, val_acc = evaluate(model, F.cross_entropy,
valid_dl, metric=accuracy)
print('Loss: {:.4f}, Accuracy: {:.4f}'.format(val_loss, val_acc))
losses1, metrics1 = fit(5, 0.5, model, F.cross_entropy,
train_dl, valid_dl, accuracy)
losses2, metrics2 = fit(5, 0.1, model, F.cross_entropy,
train_dl, valid_dl, accuracy)
import matplotlib.pyplot as plt
# Replace these values with your results
accuracies = [val_acc] + metrics1 + metrics2
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs');
!pip install jovian --upgrade -q
import jovian
jovian.commit()
| 0.945525 | 0.992767 |
# Example 10 Backreaction
One of $\texttt{DarkHistory}$'s signature features is its ability to include the effects of backreaction on the thermal and ionization histories of the universe. In previous treatments, one assumed that the effects of dark matter energy injection were small enough that one could replace the hydrogen ionization level $x_\text{HII}$ and the IGM temperature $T_m$ by their baseline values when computed in the absence of dark matter energy injection $x_{\text{HII},0}$ and $T_{m,0}$. On the other hand, $\texttt{DarkHistory}$ can be used without this approximation. We will see in this notebook that there is a wide range of models for which this improved treatment is necessary.
## Import Modules
```
%load_ext autoreload
import sys
sys.path.append("..")
%matplotlib inline
%autoreload
%autoreload
import matplotlib
matplotlib.rc_file('matplotlibrc')
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
import numpy as np
import darkhistory.physics as phys
from darkhistory.history.tla import get_history
import main
import config
from tqdm import tqdm_notebook as tqdm
```
## Load the maximum allowed $\langle \sigma v \rangle$ and minimum $\tau$
To assess the effects of backreaction, we will compute $T_m(z)$ with and without backreaction for the cases of dark matter decaying or annihilating into $\gamma \gamma$ or $e^+ e^-$, and we will sweep over a wide range of dark matter masses, $m_\chi$. The effects of backreaction are amplified as the lifetime $\tau$ is decreased or the velocity-averaged cross-section $\langle \sigma v \rangle$ is increased. We will therefore use the maximum $\langle \sigma v \rangle$ and minimum $\tau$ allowed by current CMB constraints [[1]](#cite_Tracy2015), [[2]](#cite_Chih_Liang), [[3]](#cite_PLANCK). We download these maximal values as a function of secondary particle injection energy $E_\text{inj}$. The CMB limit data has been included in the downloaded data from [*here*](https://doi.org/10.7910/DVN/DUOUWA).
Note: It is important when downloading the 'CMB_limits*' files that the 'Original Format' option is chosen to ensure that the two .csv files containing the decay limits are downloaded correctly.
```
f_elec_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_elec_swave.dat', delimiter=',')
log10eng_elec_CMB = f_elec_CMB_raw[0:2760:69, 0]
log10rs_elec_CMB = f_elec_CMB_raw[0:69, 1]
f_phot_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_phot_swave.dat', delimiter=',')
log10eng_phot_CMB = f_phot_CMB_raw[0:2800:70, 0]
log10rs_phot_CMB = f_phot_CMB_raw[0:70, 1]
f_elec_CMB_raw = np.transpose(np.reshape(f_elec_CMB_raw[:,2], (40,69)))
f_phot_CMB_raw = np.transpose(np.reshape(f_phot_CMB_raw[:,2], (40,70)))
f_elec_CMB = interp2d(log10eng_elec_CMB, log10rs_elec_CMB, f_elec_CMB_raw)
f_phot_CMB = interp2d(log10eng_phot_CMB, log10rs_phot_CMB, f_phot_CMB_raw)
decay_elec_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_elec_decay.csv', delimiter=',')
decay_phot_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_phot_decay.csv', delimiter=',')
decay_elec_CMB = interp1d(np.transpose(decay_elec_CMB_raw)[0,:], np.transpose(decay_elec_CMB_raw)[1,:])
decay_phot_CMB = interp1d(np.transpose(decay_phot_CMB_raw)[0,:], np.transpose(decay_phot_CMB_raw)[1,:])
#Derived from Planck 2018 cosmological parameters
p_ann = 3.5e-28
def xsec_bound_elec_CMB(mDM, DM_process):
if DM_process == 'swave':
return p_ann*(mDM*1e-9)/f_elec_CMB(np.log10(mDM-phys.me), np.log10(601))[0]
elif DM_process == 'decay':
return np.array([decay_elec_CMB(mDM*1e-9)])[0]
def xsec_bound_phot_CMB(mDM, DM_process):
if DM_process == 'swave':
return p_ann*(mDM*1e-9)/f_phot_CMB(np.log10(mDM), np.log10(601))[0]
elif DM_process == 'decay':
return np.array([decay_phot_CMB(mDM*1e-9)])[0]
```
## Comparison to Fig. 15 of [[4]](#cite_Hongwan)
To get a feel for the calculation and check $\texttt{DarkHistory}$ we will try to reproduce Fig 15. of [[4]](#cite_Hongwan). First we establish the baseline (black-dashed curve) without dark matter energy injection. This can be done by passing a redshift vector to [*tla.get_history()*](https://darkhistory.readthedocs.io/en/master/_autosummary/darkhistory/history/tla/darkhistory.history.tla.get_history.html).
```
rs_vec = 3000*np.exp(-.004*np.arange(1595))
baseline = get_history(rs_vec)
```
Now we calculate $T_m(z)$ assuming no backreaction. To do so, we will use tabulated values for $f_c(z)$ and use them as inputs for [*tla.get_history()*](https://darkhistory.readthedocs.io/en/master/_autosummary/darkhistory/history/tla/darkhistory.history.tla.get_history.html). We can do this by inputting `baseline_f = True`.
```
no_br = get_history(
rs_vec, baseline_f = True,
inj_particle = 'elec', DM_process = 'decay',
mDM=100e6, lifetime = 3e25,
)
```
Finally, we calculate $T_m(z)$ including backreaction by using [*main.evolve()*](https://darkhistory.readthedocs.io/en/master/_autosummary/main/main.evolve.html). This will take a while the first time it is run because we must download the transfer functions.
```
br_data = main.evolve(
primary='elec_delta',
DM_process='decay', mDM=100e6, lifetime=3e25,
start_rs = 3000,
coarsen_factor=16, backreaction=True
)
T_br = br_data['Tm']/phys.kB
```
Finally, we re-make Fig 15.
```
fig_BR_example = plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
ax = plt.gca()
ax.loglog()
plt.xlabel('Redshift $(1+z)$')
plt.ylabel('Matter Temperature $T_m$ [K]')
plt.title(r'\bf{Backreaction Comparison}')
plt.axis([5, 3e3, 1, 1e5])
plot_std, = plt.plot(rs_vec, baseline[:,0]/phys.kB, 'k--', label='Baseline')
plot_no_BR, = plt.plot(rs_vec, no_br[:,0]/phys.kB, label='Without Backreaction', color='C0')
plot_BR, = plt.plot(br_data['rs'], br_data['Tm']/phys.kB, label='With Backreaction', color='C1')
plt.text(0.06, 0.9, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', transform=ax.transAxes, fontsize=20)
plt.text(0.06, 0.84, r'$\tau = 3 \times 10^{25}$ s', transform=ax.transAxes, fontsize=20)
legend = plt.legend(
handles=[plot_no_BR, plot_BR, plot_std], loc='lower right'
)
ax = plt.subplot(1,2,2)
ax = plt.gca()
ax.loglog()
plt.xlabel('Redshift $(1+z)$')
plt.ylabel('Hydrogen Ionization Fraction $n_\mathrm{HII}/n_\mathrm{H}$')
plt.title(r'\bf{Backreaction Comparison}')
plt.axis([5, 3e3, 7e-6, 2])
plot_std, = plt.plot(rs_vec, baseline[:,1], 'k--', label='Baseline')
plot_no_BR, = plt.plot(rs_vec, no_br[:,1], label='Without Backreaction', color='C0')
plot_BR, = plt.plot(br_data['rs'], br_data['x'][:,0], label='With Backreaction', color='C1')
plt.text(0.06, 0.9, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', transform=ax.transAxes, fontsize=20)
plt.text(0.06, 0.84, r'$\tau = 3 \times 10^{25}$ s', transform=ax.transAxes, fontsize=20)
legend = plt.legend(
handles=[plot_no_BR, plot_BR, plot_std], loc='lower right'
)
```
## Calculate With and Without back-reaction
Now we perform the same calculation, except now over a grid of energy injections. We first specify these energies, then we define the function `get_T()` to easily calculate $T_m$ for a given mass and injection model with and without backreaction.
```
log10_Einj_arr = np.arange(4.01, 12.51, 0.25)
```
When computing s-wave annihilation models, we must remember that structure formation now boosts the annihilation rate at late times. We can load one of DarkHistory's default structure formation boost factors that can be obtained using the function [*physics.struct_boost_func()*](https://darkhistory.readthedocs.io/en/master/_autosummary/darkhistory/physics/darkhistory.physics.struct_boost_func.html). The first thing we need to do is obtain the function that returns the boost:
```
struct_boost_einasto_subs = phys.struct_boost_func(model='einasto_subs')
%autoreload
def get_T(mDM, DM_process, br, pri):
"""Gets the temperature history, decays.
Parameters
----------
mDM : float
The mass of the dark matter in eV.
tau : float
The decay lifetime in s.
br : bool
Whether to use backreaction or not.
pri : {'elec', 'phot'}
The primary particles that DM decays to.
Returns
--------
float
The temperature history in K.
"""
if pri == 'elec':
pri_str = 'elec_delta'
cf = 12
param = xsec_bound_elec_CMB(mDM, DM_process)
rs_vec = 3000*np.exp(-.012*np.arange(552))
if pri == 'phot':
pri_str = 'phot_delta'
cf = 4
param = xsec_bound_phot_CMB(mDM, DM_process)
rs_vec = 3000*np.exp(-.004*np.arange(1655))
if DM_process == 'swave':
struct_boost = phys.struct_boost_func(model='einasto_subs')
else:
struct_boost = None
if br:
result = main.evolve(
primary=pri_str,
DM_process=DM_process, mDM=mDM, lifetime=param, sigmav = param,
start_rs = 3000, end_rs=4.004,
coarsen_factor=cf, backreaction=False,
struct_boost = struct_boost
)
return result['Tm']/phys.kB
else:
if pri == 'phot' and DM_process == 'swave':
# Use main.evolve() here because the differential is so small that
# using f_std leads to undesirable interpolation errors.
result = main.evolve(
primary=pri_str,
DM_process=DM_process, mDM=mDM, lifetime=param, sigmav = param,
start_rs = 3000, end_rs=4.004,
coarsen_factor=cf, backreaction=False,
struct_boost = struct_boost
)
return result['Tm']/phys.kB
else:
result = get_history(
rs_vec, baseline_f = True,
inj_particle = pri, DM_process = DM_process,
mDM=mDM, lifetime = param, sigmav = param
)
return result[:,0]/phys.kB
```
We now loop over all injection energies and calculate $T_m$ for decay or annihilation into photons or $e^+e^-$.
```
Tm_no_br = np.array([None for m in log10_Einj_arr])
Tm_br = np.zeros_like(Tm_no_br)
%autoreload
for i, log10Einj in enumerate(tqdm(log10_Einj_arr)):
Einj = 10**log10Einj
print('****** log10(mDM): ', log10Einj, ' ******')
Tm_no_br[i] = {
'phot_decay': get_T(2*Einj, 'decay', br=False, pri='phot'),
# We include this little fudge factor here because python's ode solver will sometimes guess negative
# values for the redshift at certain values of Einj
'phot_swave': get_T(Einj+1e-4, 'swave', br=False, pri='phot'),
'elec_swave': get_T(Einj+phys.me, 'swave', br=False, pri='elec'),
'elec_decay': get_T(2*(Einj+phys.me), 'decay', br=False, pri='elec')
}
print('###### Calculation Complete! ######')
```
And then we calculate with back-reaction
```
%autoreload
for i, log10Einj in enumerate(tqdm(log10_Einj_arr)):
Einj = 10**log10Einj
print('****** log10(mDM): ', log10Einj, ' ******')
Tm_br[i] = {
'phot_decay': get_T(2*Einj, 'decay', br=True, pri='phot'),
'phot_swave': get_T(Einj, 'swave', br=True, pri='phot'),
'elec_swave': get_T(Einj+phys.me, 'swave', br=True, pri='elec'),
'elec_decay': get_T(2*(Einj+phys.me), 'decay', br=True, pri='elec')
}
print('###### Calculation Complete! ######')
```
## Heat Plots of the Effects of Backreaction
Now we save the data we just generated or load data what we've previously generated. `direc` should be set to the directory where the data was (or will be) saved, e.g. `direc=/directory/where/the/data/gets/saved/`.
```
import pickle
save_data = False
direc = '/foo/bar/'
if save_data:
pickle.dump(Tm_no_br, open(direc+'Tm_no_br.p','wb'))
pickle.dump(Tm_br, open(direc+'Tm_br.p','wb'))
else:
Tm_no_br = pickle.load(open(direc+'Tm_no_br.p','rb'))
Tm_br = pickle.load(open(direc+'Tm_br.p','rb'))
```
And then we plot our data
```
inj_part = 'phot'
inj_type = 'swave'
log10_Einj_arr = np.arange(4.01, 12.51, 0.25)
log10_Einj_elec_decay_arr = np.arange(4.01, 12.51, 0.25)
log10_E = log10_Einj_arr
diff_list = np.array([Tm_br0[k][inj_part+'_'+inj_type]/Tm_no_br0[k][inj_part+'_'+inj_type]-1. for k in np.arange(34)])
diff_list[diff_list<=0]=.001
if inj_part == 'phot':
cf=4
if inj_type == 'decay':
conv_fac = 2
conv_add = 0
else:
conv_fac = 1
conv_add = 0
else:
if inj_type == 'swave':
conv_fac = 1
conv_add = phys.me
else:
log10_E = log10_Einj_elec_decay_arr
conv_fac = 2
conv_add = phys.me
cf=12
rs_list = np.exp(np.arange(np.log(3000), np.log(4.004), step=-0.001*cf))
plt.figure(figsize=(7.7, 6.2))
ax = plt.gca()
ax.loglog()
# Specify the proper contour levels and label them
if inj_part == 'phot':
if inj_type == 'decay':
frac_change_levels = [1e-4, 5, 10, 20, 50, 100, 200]
frac_change_labels = ['0\%', '5\%', '10\%', '20\%', '50\%', '100\%', '200\%']
else:
frac_change_levels = [1e-5, .1, .5, 1, 2, 5, 10, 20, 40]
frac_change_labels = ['0\%','0.1\%', '0.5\%', '1\%', '2\%', '5\%', '10\%', '20\%', '40\%']
else:
if inj_type == 'decay':
frac_change_levels = [0.001, 5, 10, 50, 100, 200, 500, 900]
frac_change_labels = ['0\%', '5\%', '10\%', '50\%', '100\%', '200\%', '500\%', '900\%']
else:
frac_change_levels = [0.001, 5, 10, 20, 50, 100, 200]
frac_change_labels = ['0\%', '5\%', '10\%', '20\%', '50\%', '100\%', '200\%']
plt_heating = plt.contour(
(10**log10_E + conv_add)*conv_fac, rs_list,
np.transpose(np.abs(diff_list)*100),
levels=frac_change_levels,
linewidths=0.5,
colors='k'
)
cntr1 = ax.contourf((10**log10_E + conv_add)*conv_fac,
rs_list,
np.transpose(np.abs(diff_list)*100),
frac_change_levels,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.inferno
)
cbar = plt.colorbar(
cntr1, ax=ax, ticks=frac_change_levels,
)
cbar.set_ticklabels(frac_change_labels)
cbar.set_label(r'$T_m$ Fractional Change $\delta T_m/ T_{m,0}$', labelpad=20)
plt.axis([(10**log10_E[0] + conv_add)*conv_fac, (10**log10_E[-1] + conv_add)*conv_fac, 5.1, 201.61653821924926])
if inj_part == 'elec':
if inj_type == 'decay':
label1_txt = r'$\chi \to e^+e^-$'
label2_txt = r'$\tau_{\min}$ from CMB'
label1_dx = -0.05
label2_dx = +0.12
else:
label1_txt = r'$\chi \chi \to e^+e^-$'
label2_txt = r'$\langle \sigma v \rangle_{\max}$ from CMB'
label1_dx = -0.1
label2_dx = +0.02
else:
if inj_type == 'decay':
label1_txt = r'$\chi \to \gamma \gamma$'
label2_txt = r'$\tau_{\min}$ from CMB'
label1_dx = 0.
label2_dx = +0.12
else:
label1_txt = r'$\chi \chi \to \gamma \gamma$'
label2_txt = r'$\langle \sigma v \rangle_{\max}$ from CMB'
label1_dx = -0.04
label2_dx = +0.03
plt.text(0.77+label1_dx, 0.91, label1_txt,transform=ax.transAxes, color='w', fontsize=20)
plt.text(0.46+label2_dx, 0.83, label2_txt,transform=ax.transAxes, color='w', fontsize=20)
plt.title(r'\bf{Backreaction $T_m$ Comparison}')
plt.ylabel(r'Redshift ($1+z$)')
plt.xlabel(r'Dark Matter Mass $m_\chi$ [eV]')
```
## Bibliography
[1]<a id='cite_Tracy2015'></a> T. R. Slatyer "Indirect dark matter signatures in the cosmic dark ages. I. Generalizing the bound on s-wave dark matter annihilation from Planck results," arXiv:1506.03811, Phys. Rev. D 93, no. 2, 023527 (2016).
[2]<a id='cite_PLANCK'></a> N. Aghanim et al. (Planck), “Planck 2018 results. VI. Cosmological parameters,” (2018), arXiv:1807.06209.
[3]<a id='cite_Chih_Liang'></a> T. R. Slatyer and C. Wu, “General Constraints on Dark Matter Decay from the Cosmic Microwave Background,” arXiv:1610.06933, Phys. Rev. D95, 023010 (2017).
[4]<a id='cite_Hongwan'></a> Hongwan Liu, Tracy R. Slatyer, and Jesús Zavala, “Contributions to cosmic reionization from dark matter annihilation and decay,” Phys. Rev. D94, 063507 (2016),
arXiv:1604.02457 [astro-ph.CO].
|
github_jupyter
|
%load_ext autoreload
import sys
sys.path.append("..")
%matplotlib inline
%autoreload
%autoreload
import matplotlib
matplotlib.rc_file('matplotlibrc')
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
import numpy as np
import darkhistory.physics as phys
from darkhistory.history.tla import get_history
import main
import config
from tqdm import tqdm_notebook as tqdm
f_elec_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_elec_swave.dat', delimiter=',')
log10eng_elec_CMB = f_elec_CMB_raw[0:2760:69, 0]
log10rs_elec_CMB = f_elec_CMB_raw[0:69, 1]
f_phot_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_phot_swave.dat', delimiter=',')
log10eng_phot_CMB = f_phot_CMB_raw[0:2800:70, 0]
log10rs_phot_CMB = f_phot_CMB_raw[0:70, 1]
f_elec_CMB_raw = np.transpose(np.reshape(f_elec_CMB_raw[:,2], (40,69)))
f_phot_CMB_raw = np.transpose(np.reshape(f_phot_CMB_raw[:,2], (40,70)))
f_elec_CMB = interp2d(log10eng_elec_CMB, log10rs_elec_CMB, f_elec_CMB_raw)
f_phot_CMB = interp2d(log10eng_phot_CMB, log10rs_phot_CMB, f_phot_CMB_raw)
decay_elec_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_elec_decay.csv', delimiter=',')
decay_phot_CMB_raw = np.loadtxt(config.data_path+'/CMB_limits_phot_decay.csv', delimiter=',')
decay_elec_CMB = interp1d(np.transpose(decay_elec_CMB_raw)[0,:], np.transpose(decay_elec_CMB_raw)[1,:])
decay_phot_CMB = interp1d(np.transpose(decay_phot_CMB_raw)[0,:], np.transpose(decay_phot_CMB_raw)[1,:])
#Derived from Planck 2018 cosmological parameters
p_ann = 3.5e-28
def xsec_bound_elec_CMB(mDM, DM_process):
if DM_process == 'swave':
return p_ann*(mDM*1e-9)/f_elec_CMB(np.log10(mDM-phys.me), np.log10(601))[0]
elif DM_process == 'decay':
return np.array([decay_elec_CMB(mDM*1e-9)])[0]
def xsec_bound_phot_CMB(mDM, DM_process):
if DM_process == 'swave':
return p_ann*(mDM*1e-9)/f_phot_CMB(np.log10(mDM), np.log10(601))[0]
elif DM_process == 'decay':
return np.array([decay_phot_CMB(mDM*1e-9)])[0]
rs_vec = 3000*np.exp(-.004*np.arange(1595))
baseline = get_history(rs_vec)
no_br = get_history(
rs_vec, baseline_f = True,
inj_particle = 'elec', DM_process = 'decay',
mDM=100e6, lifetime = 3e25,
)
br_data = main.evolve(
primary='elec_delta',
DM_process='decay', mDM=100e6, lifetime=3e25,
start_rs = 3000,
coarsen_factor=16, backreaction=True
)
T_br = br_data['Tm']/phys.kB
fig_BR_example = plt.figure(figsize=(15,6.5))
ax = plt.subplot(1,2,1)
ax = plt.gca()
ax.loglog()
plt.xlabel('Redshift $(1+z)$')
plt.ylabel('Matter Temperature $T_m$ [K]')
plt.title(r'\bf{Backreaction Comparison}')
plt.axis([5, 3e3, 1, 1e5])
plot_std, = plt.plot(rs_vec, baseline[:,0]/phys.kB, 'k--', label='Baseline')
plot_no_BR, = plt.plot(rs_vec, no_br[:,0]/phys.kB, label='Without Backreaction', color='C0')
plot_BR, = plt.plot(br_data['rs'], br_data['Tm']/phys.kB, label='With Backreaction', color='C1')
plt.text(0.06, 0.9, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', transform=ax.transAxes, fontsize=20)
plt.text(0.06, 0.84, r'$\tau = 3 \times 10^{25}$ s', transform=ax.transAxes, fontsize=20)
legend = plt.legend(
handles=[plot_no_BR, plot_BR, plot_std], loc='lower right'
)
ax = plt.subplot(1,2,2)
ax = plt.gca()
ax.loglog()
plt.xlabel('Redshift $(1+z)$')
plt.ylabel('Hydrogen Ionization Fraction $n_\mathrm{HII}/n_\mathrm{H}$')
plt.title(r'\bf{Backreaction Comparison}')
plt.axis([5, 3e3, 7e-6, 2])
plot_std, = plt.plot(rs_vec, baseline[:,1], 'k--', label='Baseline')
plot_no_BR, = plt.plot(rs_vec, no_br[:,1], label='Without Backreaction', color='C0')
plot_BR, = plt.plot(br_data['rs'], br_data['x'][:,0], label='With Backreaction', color='C1')
plt.text(0.06, 0.9, r'$\chi \to e^+e^-$, $m_\chi$ = 100 MeV', transform=ax.transAxes, fontsize=20)
plt.text(0.06, 0.84, r'$\tau = 3 \times 10^{25}$ s', transform=ax.transAxes, fontsize=20)
legend = plt.legend(
handles=[plot_no_BR, plot_BR, plot_std], loc='lower right'
)
log10_Einj_arr = np.arange(4.01, 12.51, 0.25)
struct_boost_einasto_subs = phys.struct_boost_func(model='einasto_subs')
%autoreload
def get_T(mDM, DM_process, br, pri):
"""Gets the temperature history, decays.
Parameters
----------
mDM : float
The mass of the dark matter in eV.
tau : float
The decay lifetime in s.
br : bool
Whether to use backreaction or not.
pri : {'elec', 'phot'}
The primary particles that DM decays to.
Returns
--------
float
The temperature history in K.
"""
if pri == 'elec':
pri_str = 'elec_delta'
cf = 12
param = xsec_bound_elec_CMB(mDM, DM_process)
rs_vec = 3000*np.exp(-.012*np.arange(552))
if pri == 'phot':
pri_str = 'phot_delta'
cf = 4
param = xsec_bound_phot_CMB(mDM, DM_process)
rs_vec = 3000*np.exp(-.004*np.arange(1655))
if DM_process == 'swave':
struct_boost = phys.struct_boost_func(model='einasto_subs')
else:
struct_boost = None
if br:
result = main.evolve(
primary=pri_str,
DM_process=DM_process, mDM=mDM, lifetime=param, sigmav = param,
start_rs = 3000, end_rs=4.004,
coarsen_factor=cf, backreaction=False,
struct_boost = struct_boost
)
return result['Tm']/phys.kB
else:
if pri == 'phot' and DM_process == 'swave':
# Use main.evolve() here because the differential is so small that
# using f_std leads to undesirable interpolation errors.
result = main.evolve(
primary=pri_str,
DM_process=DM_process, mDM=mDM, lifetime=param, sigmav = param,
start_rs = 3000, end_rs=4.004,
coarsen_factor=cf, backreaction=False,
struct_boost = struct_boost
)
return result['Tm']/phys.kB
else:
result = get_history(
rs_vec, baseline_f = True,
inj_particle = pri, DM_process = DM_process,
mDM=mDM, lifetime = param, sigmav = param
)
return result[:,0]/phys.kB
Tm_no_br = np.array([None for m in log10_Einj_arr])
Tm_br = np.zeros_like(Tm_no_br)
%autoreload
for i, log10Einj in enumerate(tqdm(log10_Einj_arr)):
Einj = 10**log10Einj
print('****** log10(mDM): ', log10Einj, ' ******')
Tm_no_br[i] = {
'phot_decay': get_T(2*Einj, 'decay', br=False, pri='phot'),
# We include this little fudge factor here because python's ode solver will sometimes guess negative
# values for the redshift at certain values of Einj
'phot_swave': get_T(Einj+1e-4, 'swave', br=False, pri='phot'),
'elec_swave': get_T(Einj+phys.me, 'swave', br=False, pri='elec'),
'elec_decay': get_T(2*(Einj+phys.me), 'decay', br=False, pri='elec')
}
print('###### Calculation Complete! ######')
%autoreload
for i, log10Einj in enumerate(tqdm(log10_Einj_arr)):
Einj = 10**log10Einj
print('****** log10(mDM): ', log10Einj, ' ******')
Tm_br[i] = {
'phot_decay': get_T(2*Einj, 'decay', br=True, pri='phot'),
'phot_swave': get_T(Einj, 'swave', br=True, pri='phot'),
'elec_swave': get_T(Einj+phys.me, 'swave', br=True, pri='elec'),
'elec_decay': get_T(2*(Einj+phys.me), 'decay', br=True, pri='elec')
}
print('###### Calculation Complete! ######')
import pickle
save_data = False
direc = '/foo/bar/'
if save_data:
pickle.dump(Tm_no_br, open(direc+'Tm_no_br.p','wb'))
pickle.dump(Tm_br, open(direc+'Tm_br.p','wb'))
else:
Tm_no_br = pickle.load(open(direc+'Tm_no_br.p','rb'))
Tm_br = pickle.load(open(direc+'Tm_br.p','rb'))
inj_part = 'phot'
inj_type = 'swave'
log10_Einj_arr = np.arange(4.01, 12.51, 0.25)
log10_Einj_elec_decay_arr = np.arange(4.01, 12.51, 0.25)
log10_E = log10_Einj_arr
diff_list = np.array([Tm_br0[k][inj_part+'_'+inj_type]/Tm_no_br0[k][inj_part+'_'+inj_type]-1. for k in np.arange(34)])
diff_list[diff_list<=0]=.001
if inj_part == 'phot':
cf=4
if inj_type == 'decay':
conv_fac = 2
conv_add = 0
else:
conv_fac = 1
conv_add = 0
else:
if inj_type == 'swave':
conv_fac = 1
conv_add = phys.me
else:
log10_E = log10_Einj_elec_decay_arr
conv_fac = 2
conv_add = phys.me
cf=12
rs_list = np.exp(np.arange(np.log(3000), np.log(4.004), step=-0.001*cf))
plt.figure(figsize=(7.7, 6.2))
ax = plt.gca()
ax.loglog()
# Specify the proper contour levels and label them
if inj_part == 'phot':
if inj_type == 'decay':
frac_change_levels = [1e-4, 5, 10, 20, 50, 100, 200]
frac_change_labels = ['0\%', '5\%', '10\%', '20\%', '50\%', '100\%', '200\%']
else:
frac_change_levels = [1e-5, .1, .5, 1, 2, 5, 10, 20, 40]
frac_change_labels = ['0\%','0.1\%', '0.5\%', '1\%', '2\%', '5\%', '10\%', '20\%', '40\%']
else:
if inj_type == 'decay':
frac_change_levels = [0.001, 5, 10, 50, 100, 200, 500, 900]
frac_change_labels = ['0\%', '5\%', '10\%', '50\%', '100\%', '200\%', '500\%', '900\%']
else:
frac_change_levels = [0.001, 5, 10, 20, 50, 100, 200]
frac_change_labels = ['0\%', '5\%', '10\%', '20\%', '50\%', '100\%', '200\%']
plt_heating = plt.contour(
(10**log10_E + conv_add)*conv_fac, rs_list,
np.transpose(np.abs(diff_list)*100),
levels=frac_change_levels,
linewidths=0.5,
colors='k'
)
cntr1 = ax.contourf((10**log10_E + conv_add)*conv_fac,
rs_list,
np.transpose(np.abs(diff_list)*100),
frac_change_levels,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.inferno
)
cbar = plt.colorbar(
cntr1, ax=ax, ticks=frac_change_levels,
)
cbar.set_ticklabels(frac_change_labels)
cbar.set_label(r'$T_m$ Fractional Change $\delta T_m/ T_{m,0}$', labelpad=20)
plt.axis([(10**log10_E[0] + conv_add)*conv_fac, (10**log10_E[-1] + conv_add)*conv_fac, 5.1, 201.61653821924926])
if inj_part == 'elec':
if inj_type == 'decay':
label1_txt = r'$\chi \to e^+e^-$'
label2_txt = r'$\tau_{\min}$ from CMB'
label1_dx = -0.05
label2_dx = +0.12
else:
label1_txt = r'$\chi \chi \to e^+e^-$'
label2_txt = r'$\langle \sigma v \rangle_{\max}$ from CMB'
label1_dx = -0.1
label2_dx = +0.02
else:
if inj_type == 'decay':
label1_txt = r'$\chi \to \gamma \gamma$'
label2_txt = r'$\tau_{\min}$ from CMB'
label1_dx = 0.
label2_dx = +0.12
else:
label1_txt = r'$\chi \chi \to \gamma \gamma$'
label2_txt = r'$\langle \sigma v \rangle_{\max}$ from CMB'
label1_dx = -0.04
label2_dx = +0.03
plt.text(0.77+label1_dx, 0.91, label1_txt,transform=ax.transAxes, color='w', fontsize=20)
plt.text(0.46+label2_dx, 0.83, label2_txt,transform=ax.transAxes, color='w', fontsize=20)
plt.title(r'\bf{Backreaction $T_m$ Comparison}')
plt.ylabel(r'Redshift ($1+z$)')
plt.xlabel(r'Dark Matter Mass $m_\chi$ [eV]')
| 0.457621 | 0.928279 |
# Intro
In this notebook, we will learn about constructing and interpreting precision-recall (PR) and receiver operating characteristic (ROC) curves. These curves are typically used to judge the performances of probabilistic classifiers beyond the confusion matrix and other performance measures that are derived from the confusion matrix. Note that the confusion matrix primarily judges categorical decisions, while PR and ROC curves are assessing probabilistic decisions. If a classifier does indeed make only categorical decisions (e.g. True or False, 1 or 0, yes or no, etc..), it may be sufficient to stick with a confusion matrix (although it is possible to turn categorical decisions to probabilistic ones, see [isotonic regression](https://en.wikipedia.org/wiki/Isotonic_regression)).
However, oftentimes, a classifier makes probabilistic predictions (e.g. 80% True and 20% False, 75% 1 and 25% 0, 33% yes and 67% no, etc...) and the tragedy of judging classifier performance starts by choosing an arbitrary threshold (typically, > 50%) to categorize the prediction. For example, if a probabilistic classifier predicts True at 80%, one might say that the prediction is categorically True (since 80% > 50%); if a prediction is True at 49%, one might say that the prediction is categorically False (since 49% < 50%). The 50% cut-off is arbitrary, and one may choose any cut-off or threshold to impact the confusion matrix. With PR and ROC curves, the idea is to vary the cut-off from $[0, 1]$ and measure the performance over all these thresholds. In fact, after varying the cut-off, one may use PR and ROC curves to choose an optimal threshold to balance for trade-offs between precision vs recall or true positive rate (TPR) vs false positive rate (FPR).
To understand PR and ROC curves, first understand the confusion matrix. Assume that there are only 2 classes to predict; True and False. Then a 2 x 2 matrix may be created such that the rows represent the predictions and the columns represent the truth.
```
+-------+------+-------+
| | True | False |
+-------+------+-------+
| True | tp | fp |
+-------+------+-------+
| False | fn | tn |
+-------+------+-------+
```
Each element in the confusion matrix stands for something.
* tp is the number of true positive (model predicts the observation as true when the observation is really true)
* fp is the number of false positive (model predicts the observation as true when the observation is really false)
* fn is the number of false negative (model predicts the observation as false when the observation is really true)
* tn is the number of true negative (model predicts the observation as false when the observation is really false)
From these simple counts, a wonderful list of performance measures may be created (refer to the [Wikipedia site](https://en.wikipedia.org/wiki/Confusion_matrix)). Here, we only focus on precision, recall, TPR and FPR, which are defined as follows.
* $\text{precision}=\frac{\text{TP}}{\text{TP} + \text{FP}}$
* $\text{recall}=\frac{\text{TP}}{\text{TP} + \text{FN}}$
* $\text{TPR}=\frac{\text{TP}}{\text{TP} + \text{FN}}$
* $\text{FPR}=\frac{\text{FP}}{\text{FP} + \text{TN}}$
Note that recall and TPR are the same thing and also referred to by many other names (e.g. sensitivity).
If a classifier is a probabilistic one, we may choose an arbitrary threshold, above which we will categorically say the prediction is True, to induce a confusion matrix and hence precision, recall, TPR and FPR. Changing the threshold will change the confusion matrix and hence, precision, recall, TPR and FPR (typically). We can `cheat` and choose one threshold to produce a set of desired precision, recall, TPR and FPR and report out that our classifier performs really well. However, we know better that we should look at these performance measures over all possible thresholds and report the aggregate (typically average) performance. The PR and ROC curves are visualizations of this operation, and the area under these curves (integrating over these curves) are the expected performance across all thresholds.
The PR curve plots precision vs recall and we may observe the trade-off between the two; the ROC curve plots TPR vs FPR and we also observe the trade-off between the two. Note that precision, recall, TPR and FPR are scalar values that all lie in the range of $[0, 1]$. As such, the PR and ROC curves' domains in the x and y axis are also in the range $[0, 1]$. Meaning, the whole area is a square and a square with side length of 1 has an area of 1. This geometric understanding is important because when we draw the curve and integrate, the integration value will always be $[0, 1]$ where a lower value indicates `bad` performance and a higher value indicates `good` performance. The integration of the curve is often called the area under the curve (AUC), and the AUC for PR is denoted as AUC-PR and for ROC is AUC-ROC.
# Simulate data
Let's start out by simulating some data. We have 2 classes, 0 and 1, and we sample from 2 different multivariate gaussians.
* $\mu_0 = [1.5, 2.5, 3.3]$, $\Sigma_0 = \begin{bmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{bmatrix}$
* $\mu_1 = [1.7, 2.9, 3.0]$, $\Sigma_1 = \begin{bmatrix} 1.0 & 0.5 & 0.2 \\ 0.5 & 1.0 & 2.0 \\ 0.2 & 2.0 & 1.0 \end{bmatrix}$
We purposefully make the means between the two models very similar to give the classifier problems deciding. We take 10,000 samples from each (total 20,000 samples) for training, `T`, and 25 samples from each (total 50 samples) for validation, `V`.
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from collections import namedtuple
from scipy.stats import multivariate_normal
import warnings
warnings.filterwarnings('ignore')
np.random.seed(37)
MVN = namedtuple('MVN', 'mean cov')
DATA = namedtuple('DATA', 'X, y')
mvn0 = MVN(np.array([1.5, 2.5, 3.3]), np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]))
mvn1 = MVN(np.array([1.7, 2.9, 3.0]), np.array([[1.0, 0.5, 0.2], [0.5, 1.0, 2.0], [0.2, 2.0, 1.0]]))
N = 10000
X0 = np.array([multivariate_normal.rvs(mvn0.mean, mvn0.cov) for _ in range(N)])
X1 = np.array([multivariate_normal.rvs(mvn1.mean, mvn1.cov) for _ in range(N)])
y0 = np.full((N, 1), 0, dtype=np.int32)
y1 = np.full((N, 1), 1, dtype=np.int32)
X = np.vstack([X0, X1])
y = np.vstack([y0, y1])
T = DATA(X, y)
N = 25
X0 = np.array([multivariate_normal.rvs(mvn0.mean, mvn0.cov) for _ in range(N)])
X1 = np.array([multivariate_normal.rvs(mvn1.mean, mvn1.cov) for _ in range(N)])
y0 = np.full((N, 1), 0, dtype=np.int32)
y1 = np.full((N, 1), 1, dtype=np.int32)
X = np.vstack([X0, X1])
y = np.vstack([y0, y1])
V = DATA(X, y)
```
# Learn a classifier
After we have our training `T` and validation `V` data, we plug in the data into a random forest classifier. We use the `predict_proba` function to retrieve the prediction probabilities of the validation example being 1 (or True). Below, `outcomes` is a list of tuples, where each tuple is a pair composed of the true label `y_t` and predicted label `y_p`.
```
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state=37)
rf_model.fit(T.X, T.y)
y_pred = rf_model.predict_proba(V.X)[:,1]
outcomes = sorted([(y_t, y_p) for y_t, y_p in zip(V.y[:,0], y_pred)],
key=lambda tup: (-tup[1], -tup[0]))
```
# Construct the precision-recall curve
Now with `y_t, y_p`, we create count tp, fp and fn and compute precision and recall. Note that `get_pr()` computes the precision and recall for one threshold `t`, and that `get_prs` returns a list of precisions, recalls, and thresholds. To make a precision-recall curve, we use the `step` function. We make two precision-recall curves; one where the precision and recall values are computed from Scikit and another one where these values are computed using our own code. This side-by-side comparison is just to show that we know how to compute and graph these precision-recall curves ourselves.
```
from sklearn.metrics import precision_recall_curve
def get_pr(outcomes, t):
tp = 0
fp = 0
fn = 0
for y_t, y_p in outcomes:
clazz_t = int(y_t)
clazz_p = 1 if y_p >= t else 0
tp = tp + (1 if clazz_t == 1 and clazz_p == 1 else 0)
fp = fp + (1 if clazz_t == 0 and clazz_p == 1 else 0)
fn = fn + (1 if clazz_t == 1 and clazz_p == 0 else 0)
pre = tp / (tp + fp)
rec = tp / (tp + fn)
return pre, rec
def get_prs(outcomes, thresholds=np.linspace(0.2, 1, 9)):
pres = []
recs = []
thrs = []
for t in thresholds:
pre, rec = get_pr(outcomes, t)
pres.append(pre)
recs.append(rec)
thrs.append(t)
pres.append(1.0)
recs.append(0.0)
return np.array(pres), np.array(recs), np.array(thrs)
pre_m, rec_m, _ = get_prs(outcomes, thresholds=np.linspace(0.2, 1, 100))
pre_s, rec_s, _ = precision_recall_curve(V.y, y_pred)
baseline = np.sum(V.y) / len(V.y)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
ax[0].step(rec_s, pre_s, color='b', alpha=0.5, where='post', label='PR curve')
ax[0].set_xlabel('recall')
ax[0].set_ylabel('precision')
ax[0].set_title('Scikit Precision-Recall Curve')
ax[0].plot((0, 1), (baseline, baseline), 'r--', alpha=0.3, label='baseline')
ax[0].legend()
ax[1].step(rec_m, pre_m, color='b', alpha=0.5, where='post', label='PR curve')
ax[1].set_xlabel('recall')
ax[1].set_ylabel('precision')
ax[1].set_title('Manual Precision-Recall Curve')
ax[1].plot((0, 1), (baseline, baseline), 'r--', alpha=0.3, label='baseline')
ax[1].legend()
plt.tight_layout()
```
# Integrate the precision-recall curve (using trapezoid method)
Now we integrate over these curves.
* `apr` is the average precision value computed from Scikit's API
* `apr_s` is the average precision value computed from our code (precision and recall from Scikit)
* `apr_m` is the average precision value computed from our code (precision and recall from our code)
Note how they are off? Our code uses trapezoid integration which is optimistic.
```
from sklearn.metrics import average_precision_score
def get_apr(pre, rec):
x = np.flip(rec)
y = np.flip(pre)
return np.trapz(y, x)
apr = average_precision_score(V.y, y_pred)
apr_s = get_apr(pre_s, rec_s)
apr_m = get_apr(pre_m, rec_m)
print('apr = {:.5f}, apr_s = {:.5f}, apr_m = {:.5f}'.format(apr, apr_s, apr_m))
```
# Integrate the precision-recall curve (using weighted average)
Here, we use the suggested conservative integration approach and now all average percision agree.
```
def get_apr(pre, rec):
x = np.flip(rec)
y = np.flip(pre)
total = 0
for i in range(len(x)):
r_c = x[i]
r_p = x[i if i - 1 < 0 else i - 1]
p_i = y[i]
a = (r_c - r_p) * p_i
total = total + a
return total
apr_s = get_apr(pre_s, rec_s)
apr_m = get_apr(pre_m, rec_m)
print('apr = {:.5f}, apr_s = {:.5f}, apr_m = {:.5f}'.format(apr, apr_s, apr_m))
```
# Construct the receiver operating characterics (ROC) curve
We turn our attention to the ROC curve now. The algorithms for generating the data for PR and ROC curves are nearly identical and differ only in what we return. In PR, we return the precision and recall, but in ROC, we return TPR and FPR.
Note that here, we pass in thresholds in a descending order and there is a 2 prepended to the thresholds in $[0, 1]$. The 2 prepended to the thresholds is easy to explain; it's simply there to level off the curve (we actually did a similar thing when constructing the PR curve, although we manipulated the TPR and FPR instead of the thresholds). The descending order of the thresholds makes the FPR vector sort in an increasing sequence.
```
from sklearn.metrics import roc_curve
def get_fpr_tpr(outcomes, t):
tp = 0
tn = 0
fp = 0
fn = 0
for y_t, y_p in outcomes:
clazz_t = int(y_t)
clazz_p = 1 if y_p >= t else 0
tp = tp + (1 if clazz_t == 1 and clazz_p == 1 else 0)
fp = fp + (1 if clazz_t == 0 and clazz_p == 1 else 0)
fn = fn + (1 if clazz_t == 1 and clazz_p == 0 else 0)
tn = tn + (1 if clazz_t == 0 and clazz_p == 0 else 0)
fpr = fp / (fp + tn)
tpr = tp / (tp + fn)
return fpr, tpr
def get_all_fpr_tpr(outcomes, thresholds=np.flip(np.append(np.linspace(0, 1, 11), [2]))):
fprs = []
tprs = []
thrs = []
for t in thresholds:
fpr, tpr = get_fpr_tpr(outcomes, t)
fprs.append(fpr)
tprs.append(tpr)
thrs.append(t)
return np.array(fprs), np.array(tprs), np.array(thrs)
fpr_m, tpr_m, t_m = get_all_fpr_tpr(outcomes, thresholds=np.flip(np.append(np.linspace(0, 1, 100), [2])))
fpr_s, tpr_s, t_s = roc_curve(V.y, y_pred)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
ax[0].step(fpr_s, tpr_s, color='b', alpha=0.5, where='post', label='ROC curve')
ax[0].set_xlabel('fpr')
ax[0].set_ylabel('tpr')
ax[0].set_title('Scikit ROC')
ax[0].plot((0, 1), (0, 1), 'r--', alpha=0.2, label='baseline')
ax[0].legend()
ax[1].step(fpr_m, tpr_m, color='b', alpha=0.5, where='post', label='ROC curve')
ax[1].set_xlabel('fpr')
ax[1].set_ylabel('tpr')
ax[1].set_title('Manual ROC')
ax[1].plot((0, 1), (0, 1), 'r--', alpha=0.2, label='baseline')
ax[1].legend()
plt.tight_layout()
```
# Integrate the ROC curve (using trapezoid method)
Here we use trapezoid integration over the ROC curves and observe that our manual approach agrees with Scikit's.
```
from sklearn.metrics import roc_auc_score
def get_auc(tpr, fpr):
return np.trapz(tpr, fpr)
auc = roc_auc_score(V.y, y_pred)
auc_s = get_auc(tpr_s, fpr_s)
auc_m = get_auc(tpr_m, fpr_m)
print('auc = {:.5f}, auc_s = {:.5f}, auc_m = {:.5f}'.format(auc, auc_s, auc_m))
```
# Interpretation of area under the curve (AUC)
The interpretations of AUC-PR and AUC-ROC are different.
## AUC-ROC
The `baseline` curve in a ROC curve plot is the diagonal (bottom left to top right) line. The baseline diagonal splits the square into two equal halves and integrating over it will be 0.5. A ROC curve that aligns with this baseline curve is interpreted as doing no better than chance guessing. A ROC curve that `dominates` (is greater than) the baseline curve is interpreted as doing better than chance guessing. Finally, a ROC curve that is below the baseline curve does worse than chance guessing.
An AUC-ROC equal to 0.5 indicates classification performance that is no better than chance guessing. A `perfect` ROC curve integrates to 1.0 and the classification performance is perfect (never wrong). If AUC-ROC is less than 0.5, then the classification performance is worse than guess/chance. It's interesting to note that in the case of an AUC-ROC that's very low, if we simply reverse the direction of decision, a really bad classifier can become a really good one. For example, if the area under the curve is 0.1, we may reverse our decision (e.g. turn True to False and False to True), and we should observe an AUC of 0.9!
## AUC-PR
The `baseline` curve in a PR curve plot is a horizontal line (as opposed to the diagonal in the ROC cruve). Note that this PR baseline curve (horizontal line) does **NOT** split the area of the square (plot) into equal halves necessarily. The y-intercept of the PR baseline curve is determined by
$b=\frac{P}{P + N}$,
where
* $P$ is the number of positive examples,
* $N$ is the number of negative examples, and
* $b$ denotes the baseline (percentage/proportion of positive examples).
Only when $P = N$ does the PR baseline curve split the plot area into equal halves. In the running classification example, note that $P = N$ as we had 25 positive and 25 negative examples. Also note that the integration over the PR baseline curve is always equal to $b$.
AUC-PR is the expected probability of the model retrieving a relevant observation. When
* AUC-PR $ = b$, then the classifier does no better than randomly retrieving relevant examples,
* AUC-PR $ > b$, then the classifier does better than randomly retrieving relevant examples, and
* AUC-PR $ < b$, then the classifier does worse than randomly retrieving relevant examples.
## AUC-ROC and AUC-PR
Here are some closing thoughts on AUC-ROC and AUC-PR.
* Typically, both PR and ROC curves are plotted together, however, PR curves are very useful when there is `data imbalance` or `data skew` (very little examples of the positive class during training) or when you want to emphasize and characterize the classifier's performance for predicting the positive class. For example, if there are very few cases of a disease and you are building a classifier to predict for the presence of such disease in a patient, you might want to emphasize the PR curve over the ROC curve.
* The PR curve does not care about TN (true negatives), and only focuses on the counts from the confusion matrix that deal with positive predictions (TP, FP, FN). On the other hand, the ROC curve considers all the counts (TP, FP, FN, TN) from the confusion matrix.
* The AUC-ROC may be manipulated to appear better if one increases TN (puts a lot of negative examples into the the sample).
* Note that the ROC baseline curve is diagonal and always the same across ROC plots but the PR baseline curve is horizontal and different across PR plots; this observation suggest that you may compare AUC-ROC for two different classifiers, each trained with different data, but you should not do so for AUC-PR (unless the baselines are the same).
* Sometimes, one may wish to find an optimal cut-off/threshold from the PR or ROC curves. One typical way is to simply grab the threshold closest to the corner. For a PR curve, since it is decreasing from left to right, the threshold closest to the top right corner might be the optimal threshold. For a ROC curve, since it is increasing from left to right, the threshold closest to the top left corner might be the optimal threshold. Remember, thresholds selected off the PR and ROC curves do not maximize or optimize accuracy but the trade-off between precision-recall or TPR-FPR.
* There are faster ways of constructing the PR and ROC curves. Here, we construct the curves through defined thresholds and for $T$ thresholds, we have to pass over the data $T$ times. With $N$ data points, this means $TN$ operations. If we sort the predictions descendingly by their probabilities of being in the positive class, we can use those dinstinct probabilities as thresholds, and at every unique probability, compute the (cummulative) confusion matrix. This approach only requires $T$ operations where $T$ is the number of unique probabilities. However, this latter approach requires a sort (which is really not a problem as the best sorting algorithms are logarithmic in worst case running time complexity). This approach is akin to finding [concordant pairs](https://en.wikipedia.org/wiki/Concordant_pair), hence, the concordance intepretation of AUC-ROC (why not a concordant interpretation of AUC-PR then?).
[A useful way to interpret AUC-ROC is with concordance](https://stats.stackexchange.com/questions/190216/why-is-roc-auc-equivalent-to-the-probability-that-two-randomly-selected-samples). AUC-ROC the expected probability that the classifier will `rank` a randomly selected positive example higher than a randomly selected negative one as belonging to the positive class.
[A useful way to intepret AUC-PR](http://cs229.stanford.edu/section/evaluation_metrics.pdf) is that it is the expected precision when randomly selecting a threshold.
These last two interpretations are fascinating. In the AUC-ROC case, we randomly select samples; in the AUC-PR case, we randomly select the threshold, which is a probability. This situation is not unlike the frequentist versus Bayesianist views of models, parameters, and data, where the former says the parameter stays fixed but the data changes, and the latter says the parameter changes but the data stays fixed. It might be an analogical stretch, but food for thought in thinking about interepreting AUC for PR and ROC.
We take the point of view that it is most useful to judge AUC-PR and AUC-ROC with respect to baseline. In AUC-ROC, since baseline is always 0.5, an AUC-ROC significantly greater than 0.5 should be `good` (at least the classifier is not guessing). In AUC-PR, the baseline changes from data to data, and so if we see an AUC-PR of 0.5, we cannot say it is only as good as guessing (without knowing the baseline). If the baseline was 0.1 and AUC-PR was 0.5, then AUC-PR is way better than guessing and `good`. Additionally, if the baseline was 0.9 and AUC-PR was 0.9, then the classifier is only as good as guessing (a baseline of 0.9 means that 90% of the data belongs to the positive class, which then should make detecting the negative class harder).
Furthermore, just because AUC-PR or AUC-ROC is better than baseline does not necessarily mean the classifier is `good`. Let's say the baseline is 0.5 and AUC-PR and AUC-ROC are both 0.85. We might say, the classifier is `good` with respect to the baseline and stop there. But what if in this field, in which you are learning the classifier and trying to judge its performance, researchers/practioners are reporting other classifier AUC performances at 95%? In this situation, the classifier is better than baseline but not `good` in the sense that compared to other classifiers, it's well below.
# Links
* https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
* https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html
* https://bmcmedresmethodol.biomedcentral.com/articles/10.1186/1471-2288-12-82
* https://stats.stackexchange.com/questions/251175/what-is-baseline-in-precision-recall-curve
* https://stats.stackexchange.com/questions/7207/roc-vs-precision-and-recall-curves
* https://stats.stackexchange.com/questions/123360/what-is-auc-of-pr-curve
* https://stats.stackexchange.com/questions/90779/area-under-the-roc-curve-or-area-under-the-pr-curve-for-imbalanced-data
* https://stats.stackexchange.com/questions/272314/how-does-auc-of-roc-equal-concordance-probability
* http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_DavisG06.pdf
|
github_jupyter
|
+-------+------+-------+
| | True | False |
+-------+------+-------+
| True | tp | fp |
+-------+------+-------+
| False | fn | tn |
+-------+------+-------+
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from collections import namedtuple
from scipy.stats import multivariate_normal
import warnings
warnings.filterwarnings('ignore')
np.random.seed(37)
MVN = namedtuple('MVN', 'mean cov')
DATA = namedtuple('DATA', 'X, y')
mvn0 = MVN(np.array([1.5, 2.5, 3.3]), np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]))
mvn1 = MVN(np.array([1.7, 2.9, 3.0]), np.array([[1.0, 0.5, 0.2], [0.5, 1.0, 2.0], [0.2, 2.0, 1.0]]))
N = 10000
X0 = np.array([multivariate_normal.rvs(mvn0.mean, mvn0.cov) for _ in range(N)])
X1 = np.array([multivariate_normal.rvs(mvn1.mean, mvn1.cov) for _ in range(N)])
y0 = np.full((N, 1), 0, dtype=np.int32)
y1 = np.full((N, 1), 1, dtype=np.int32)
X = np.vstack([X0, X1])
y = np.vstack([y0, y1])
T = DATA(X, y)
N = 25
X0 = np.array([multivariate_normal.rvs(mvn0.mean, mvn0.cov) for _ in range(N)])
X1 = np.array([multivariate_normal.rvs(mvn1.mean, mvn1.cov) for _ in range(N)])
y0 = np.full((N, 1), 0, dtype=np.int32)
y1 = np.full((N, 1), 1, dtype=np.int32)
X = np.vstack([X0, X1])
y = np.vstack([y0, y1])
V = DATA(X, y)
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(random_state=37)
rf_model.fit(T.X, T.y)
y_pred = rf_model.predict_proba(V.X)[:,1]
outcomes = sorted([(y_t, y_p) for y_t, y_p in zip(V.y[:,0], y_pred)],
key=lambda tup: (-tup[1], -tup[0]))
from sklearn.metrics import precision_recall_curve
def get_pr(outcomes, t):
tp = 0
fp = 0
fn = 0
for y_t, y_p in outcomes:
clazz_t = int(y_t)
clazz_p = 1 if y_p >= t else 0
tp = tp + (1 if clazz_t == 1 and clazz_p == 1 else 0)
fp = fp + (1 if clazz_t == 0 and clazz_p == 1 else 0)
fn = fn + (1 if clazz_t == 1 and clazz_p == 0 else 0)
pre = tp / (tp + fp)
rec = tp / (tp + fn)
return pre, rec
def get_prs(outcomes, thresholds=np.linspace(0.2, 1, 9)):
pres = []
recs = []
thrs = []
for t in thresholds:
pre, rec = get_pr(outcomes, t)
pres.append(pre)
recs.append(rec)
thrs.append(t)
pres.append(1.0)
recs.append(0.0)
return np.array(pres), np.array(recs), np.array(thrs)
pre_m, rec_m, _ = get_prs(outcomes, thresholds=np.linspace(0.2, 1, 100))
pre_s, rec_s, _ = precision_recall_curve(V.y, y_pred)
baseline = np.sum(V.y) / len(V.y)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
ax[0].step(rec_s, pre_s, color='b', alpha=0.5, where='post', label='PR curve')
ax[0].set_xlabel('recall')
ax[0].set_ylabel('precision')
ax[0].set_title('Scikit Precision-Recall Curve')
ax[0].plot((0, 1), (baseline, baseline), 'r--', alpha=0.3, label='baseline')
ax[0].legend()
ax[1].step(rec_m, pre_m, color='b', alpha=0.5, where='post', label='PR curve')
ax[1].set_xlabel('recall')
ax[1].set_ylabel('precision')
ax[1].set_title('Manual Precision-Recall Curve')
ax[1].plot((0, 1), (baseline, baseline), 'r--', alpha=0.3, label='baseline')
ax[1].legend()
plt.tight_layout()
from sklearn.metrics import average_precision_score
def get_apr(pre, rec):
x = np.flip(rec)
y = np.flip(pre)
return np.trapz(y, x)
apr = average_precision_score(V.y, y_pred)
apr_s = get_apr(pre_s, rec_s)
apr_m = get_apr(pre_m, rec_m)
print('apr = {:.5f}, apr_s = {:.5f}, apr_m = {:.5f}'.format(apr, apr_s, apr_m))
def get_apr(pre, rec):
x = np.flip(rec)
y = np.flip(pre)
total = 0
for i in range(len(x)):
r_c = x[i]
r_p = x[i if i - 1 < 0 else i - 1]
p_i = y[i]
a = (r_c - r_p) * p_i
total = total + a
return total
apr_s = get_apr(pre_s, rec_s)
apr_m = get_apr(pre_m, rec_m)
print('apr = {:.5f}, apr_s = {:.5f}, apr_m = {:.5f}'.format(apr, apr_s, apr_m))
from sklearn.metrics import roc_curve
def get_fpr_tpr(outcomes, t):
tp = 0
tn = 0
fp = 0
fn = 0
for y_t, y_p in outcomes:
clazz_t = int(y_t)
clazz_p = 1 if y_p >= t else 0
tp = tp + (1 if clazz_t == 1 and clazz_p == 1 else 0)
fp = fp + (1 if clazz_t == 0 and clazz_p == 1 else 0)
fn = fn + (1 if clazz_t == 1 and clazz_p == 0 else 0)
tn = tn + (1 if clazz_t == 0 and clazz_p == 0 else 0)
fpr = fp / (fp + tn)
tpr = tp / (tp + fn)
return fpr, tpr
def get_all_fpr_tpr(outcomes, thresholds=np.flip(np.append(np.linspace(0, 1, 11), [2]))):
fprs = []
tprs = []
thrs = []
for t in thresholds:
fpr, tpr = get_fpr_tpr(outcomes, t)
fprs.append(fpr)
tprs.append(tpr)
thrs.append(t)
return np.array(fprs), np.array(tprs), np.array(thrs)
fpr_m, tpr_m, t_m = get_all_fpr_tpr(outcomes, thresholds=np.flip(np.append(np.linspace(0, 1, 100), [2])))
fpr_s, tpr_s, t_s = roc_curve(V.y, y_pred)
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
ax[0].step(fpr_s, tpr_s, color='b', alpha=0.5, where='post', label='ROC curve')
ax[0].set_xlabel('fpr')
ax[0].set_ylabel('tpr')
ax[0].set_title('Scikit ROC')
ax[0].plot((0, 1), (0, 1), 'r--', alpha=0.2, label='baseline')
ax[0].legend()
ax[1].step(fpr_m, tpr_m, color='b', alpha=0.5, where='post', label='ROC curve')
ax[1].set_xlabel('fpr')
ax[1].set_ylabel('tpr')
ax[1].set_title('Manual ROC')
ax[1].plot((0, 1), (0, 1), 'r--', alpha=0.2, label='baseline')
ax[1].legend()
plt.tight_layout()
from sklearn.metrics import roc_auc_score
def get_auc(tpr, fpr):
return np.trapz(tpr, fpr)
auc = roc_auc_score(V.y, y_pred)
auc_s = get_auc(tpr_s, fpr_s)
auc_m = get_auc(tpr_m, fpr_m)
print('auc = {:.5f}, auc_s = {:.5f}, auc_m = {:.5f}'.format(auc, auc_s, auc_m))
| 0.617513 | 0.992423 |
### Create a distance matrix and compare to UMAP building its own distance matrix
```
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from tqdm.autonotebook import tqdm
from joblib import Parallel, delayed
import umap
import pandas as pd
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms
from avgn.visualization.spectrogram import draw_spec_set
from avgn.visualization.quickplots import draw_projection_plots
```
### Collect data
```
DATASET_ID = 'BIRD_DB_Vireo_cassinii'
df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'cassins.pickle'
df_loc
syllable_df = pd.read_pickle(df_loc)
syllable_df["syll_len"] = syllable_df.end_time - syllable_df.start_time
len(syllable_df)
syllable_df[:3]
syllable_df['nsamp'] = [len(row.audio) for idx, row in tqdm(syllable_df.iterrows(), total=len(syllable_df))]
top_labels = (
pd.DataFrame(
{i: [np.sum(syllable_df.labels.values == i)] for i in syllable_df.labels.unique()}
)
.T.sort_values(by=0, ascending=False)[:20]
.T
)
top_labels
subset_df= syllable_df[syllable_df.labels.isin(top_labels.columns)]
subset_df[:3]
subset_df = subset_df[:5000]
len(subset_df)
```
### make distance matrix
```
from scipy.spatial.distance import squareform, pdist
specs = list(subset_df.spectrogram.values)
specs = [i/np.max(i) for i in tqdm(specs)]
specs_flattened = flatten_spectrograms(specs)
np.shape(specs_flattened)
pairs = [[a, specs_flattened[bi]] for ai, a in tqdm(enumerate(specs_flattened), total=len(specs_flattened)) for bi in np.arange(ai+1, len(specs_flattened))]
#dists = [dtw_mse(i.T, j.T) for i, j in tqdm(pairs)]
from joblib import Parallel, delayed
from joblib import parallel_backend
from scipy.spatial import distance
distance.euclidean(specs_flattened[0], specs_flattened[1])
with parallel_backend('multiprocessing', n_jobs=-1):
with Parallel(verbose=0) as parallel:
dists = parallel(
delayed(np.correlate)(a, b) for a,b in tqdm(pairs))
dists_square = squareform(np.array(dists).flatten())
ensure_dir(DATA_DIR / 'scratch' )
np.save(DATA_DIR / 'scratch' / 'cassins_dists.npy', dists_square)
import umap
u = umap.UMAP(metric="precomputed")
z = u.fit_transform(X=dists_square)
np.shape(dists_square)
scatter_spec(
z,
specs,
column_size=8,
#x_range = [-4.5,4],
#y_range = [-4.5,5.5],
pal_color="hls",
color_points=False,
enlarge_points=0,
figsize=(10, 10),
range_pad = 0.15,
scatter_kwargs = {
'labels': subset_df.labels.values,
'alpha':1,
's': 3,
'show_legend': False,
"color_palette": 'tab20',
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':3,
'ls':"dashed",
'alpha':0.25,
},
draw_lines=True,
n_subset= 1000,
border_line_width = 3,
);
#ensure_dir(FIGURE_DIR / 'cassins-specs-vs-feats')
#save_fig(FIGURE_DIR / 'cassins-specs-vs-feats' / 'UMAP_feats', dpi=300, save_jpg=True)
```
### UMAP on dtw
```
import umap
from avgn.metrics.dtw_mse import build_dtw_mse
np.shape(specs_flattened)
plt.matshow(specs[0][:15,:])
specs_T = np.array([i.T for i in specs])
dtw_metric = build_dtw_mse(specs_T[0].shape)
z_umap = umap.UMAP(metric=dtw_metric).fit_transform(specs_T.reshape(len(specs_T), -1))
subset_df['z_dtw_umap'] = list(z_umap)
subset_df.to_pickle(DATA_DIR / 'syllable_dfs' / DATASET_ID / 'cassins_small_dtw_umap.pickle')
scatter_spec(
z_umap,
specs,
column_size=8,
#x_range = [-4.5,4],
#y_range = [-4.5,5.5],
pal_color="hls",
color_points=False,
enlarge_points=0,
figsize=(10, 10),
range_pad = 0.15,
scatter_kwargs = {
'labels': subset_df.labels.values,
'alpha':1,
's': 3,
'show_legend': False,
"color_palette": 'tab20',
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':3,
'ls':"dashed",
'alpha':0.25,
},
draw_lines=True,
n_subset= 1000,
border_line_width = 3,
);
#ensure_dir(FIGURE_DIR / 'cassins-specs-vs-feats')
#save_fig(FIGURE_DIR / 'cassins-specs-vs-feats' / 'UMAP_feats', dpi=300, save_jpg=True)
```
### dtw with precomputed distance matrix
```
from avgn.metrics.dtw_mse import dtw_mse_single
pairs = [[a.T, specs[bi].T] for ai, a in tqdm(enumerate(specs), total=len(specs)) for bi in np.arange(ai+1, len(specs))]
with Parallel(n_jobs=-1, verbose=0, prefer="threads") as parallel:
dists = parallel(
delayed(dtw_mse_single)(i, j)
for i, j in tqdm(pairs)
)
dists_square = squareform(np.array(dists).flatten())
plt.matshow(dists_square)
import umap
u = umap.UMAP(metric="precomputed")
z_precomputed = u.fit_transform(X=dists_square)
scatter_spec(
z_precomputed,
specs,
column_size=8,
#x_range = [-4.5,4],
#y_range = [-4.5,5.5],
pal_color="hls",
color_points=False,
enlarge_points=0,
figsize=(10, 10),
range_pad = 0.15,
scatter_kwargs = {
'labels': subset_df.labels.values,
'alpha':1,
's': 3,
'show_legend': False,
"color_palette": 'tab20',
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':3,
'ls':"dashed",
'alpha':0.25,
},
draw_lines=True,
n_subset= 1000,
border_line_width = 3,
);
#ensure_dir(FIGURE_DIR / 'cassins-specs-vs-feats')
#save_fig(FIGURE_DIR / 'cassins-specs-vs-feats' / 'UMAP_feats', dpi=300, save_jpg=True)
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from tqdm.autonotebook import tqdm
from joblib import Parallel, delayed
import umap
import pandas as pd
from avgn.utils.paths import DATA_DIR, most_recent_subdirectory, ensure_dir
from avgn.signalprocessing.create_spectrogram_dataset import flatten_spectrograms
from avgn.visualization.spectrogram import draw_spec_set
from avgn.visualization.quickplots import draw_projection_plots
DATASET_ID = 'BIRD_DB_Vireo_cassinii'
df_loc = DATA_DIR / 'syllable_dfs' / DATASET_ID / 'cassins.pickle'
df_loc
syllable_df = pd.read_pickle(df_loc)
syllable_df["syll_len"] = syllable_df.end_time - syllable_df.start_time
len(syllable_df)
syllable_df[:3]
syllable_df['nsamp'] = [len(row.audio) for idx, row in tqdm(syllable_df.iterrows(), total=len(syllable_df))]
top_labels = (
pd.DataFrame(
{i: [np.sum(syllable_df.labels.values == i)] for i in syllable_df.labels.unique()}
)
.T.sort_values(by=0, ascending=False)[:20]
.T
)
top_labels
subset_df= syllable_df[syllable_df.labels.isin(top_labels.columns)]
subset_df[:3]
subset_df = subset_df[:5000]
len(subset_df)
from scipy.spatial.distance import squareform, pdist
specs = list(subset_df.spectrogram.values)
specs = [i/np.max(i) for i in tqdm(specs)]
specs_flattened = flatten_spectrograms(specs)
np.shape(specs_flattened)
pairs = [[a, specs_flattened[bi]] for ai, a in tqdm(enumerate(specs_flattened), total=len(specs_flattened)) for bi in np.arange(ai+1, len(specs_flattened))]
#dists = [dtw_mse(i.T, j.T) for i, j in tqdm(pairs)]
from joblib import Parallel, delayed
from joblib import parallel_backend
from scipy.spatial import distance
distance.euclidean(specs_flattened[0], specs_flattened[1])
with parallel_backend('multiprocessing', n_jobs=-1):
with Parallel(verbose=0) as parallel:
dists = parallel(
delayed(np.correlate)(a, b) for a,b in tqdm(pairs))
dists_square = squareform(np.array(dists).flatten())
ensure_dir(DATA_DIR / 'scratch' )
np.save(DATA_DIR / 'scratch' / 'cassins_dists.npy', dists_square)
import umap
u = umap.UMAP(metric="precomputed")
z = u.fit_transform(X=dists_square)
np.shape(dists_square)
scatter_spec(
z,
specs,
column_size=8,
#x_range = [-4.5,4],
#y_range = [-4.5,5.5],
pal_color="hls",
color_points=False,
enlarge_points=0,
figsize=(10, 10),
range_pad = 0.15,
scatter_kwargs = {
'labels': subset_df.labels.values,
'alpha':1,
's': 3,
'show_legend': False,
"color_palette": 'tab20',
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':3,
'ls':"dashed",
'alpha':0.25,
},
draw_lines=True,
n_subset= 1000,
border_line_width = 3,
);
#ensure_dir(FIGURE_DIR / 'cassins-specs-vs-feats')
#save_fig(FIGURE_DIR / 'cassins-specs-vs-feats' / 'UMAP_feats', dpi=300, save_jpg=True)
import umap
from avgn.metrics.dtw_mse import build_dtw_mse
np.shape(specs_flattened)
plt.matshow(specs[0][:15,:])
specs_T = np.array([i.T for i in specs])
dtw_metric = build_dtw_mse(specs_T[0].shape)
z_umap = umap.UMAP(metric=dtw_metric).fit_transform(specs_T.reshape(len(specs_T), -1))
subset_df['z_dtw_umap'] = list(z_umap)
subset_df.to_pickle(DATA_DIR / 'syllable_dfs' / DATASET_ID / 'cassins_small_dtw_umap.pickle')
scatter_spec(
z_umap,
specs,
column_size=8,
#x_range = [-4.5,4],
#y_range = [-4.5,5.5],
pal_color="hls",
color_points=False,
enlarge_points=0,
figsize=(10, 10),
range_pad = 0.15,
scatter_kwargs = {
'labels': subset_df.labels.values,
'alpha':1,
's': 3,
'show_legend': False,
"color_palette": 'tab20',
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':3,
'ls':"dashed",
'alpha':0.25,
},
draw_lines=True,
n_subset= 1000,
border_line_width = 3,
);
#ensure_dir(FIGURE_DIR / 'cassins-specs-vs-feats')
#save_fig(FIGURE_DIR / 'cassins-specs-vs-feats' / 'UMAP_feats', dpi=300, save_jpg=True)
from avgn.metrics.dtw_mse import dtw_mse_single
pairs = [[a.T, specs[bi].T] for ai, a in tqdm(enumerate(specs), total=len(specs)) for bi in np.arange(ai+1, len(specs))]
with Parallel(n_jobs=-1, verbose=0, prefer="threads") as parallel:
dists = parallel(
delayed(dtw_mse_single)(i, j)
for i, j in tqdm(pairs)
)
dists_square = squareform(np.array(dists).flatten())
plt.matshow(dists_square)
import umap
u = umap.UMAP(metric="precomputed")
z_precomputed = u.fit_transform(X=dists_square)
scatter_spec(
z_precomputed,
specs,
column_size=8,
#x_range = [-4.5,4],
#y_range = [-4.5,5.5],
pal_color="hls",
color_points=False,
enlarge_points=0,
figsize=(10, 10),
range_pad = 0.15,
scatter_kwargs = {
'labels': subset_df.labels.values,
'alpha':1,
's': 3,
'show_legend': False,
"color_palette": 'tab20',
},
matshow_kwargs = {
'cmap': plt.cm.Greys
},
line_kwargs = {
'lw':3,
'ls':"dashed",
'alpha':0.25,
},
draw_lines=True,
n_subset= 1000,
border_line_width = 3,
);
#ensure_dir(FIGURE_DIR / 'cassins-specs-vs-feats')
#save_fig(FIGURE_DIR / 'cassins-specs-vs-feats' / 'UMAP_feats', dpi=300, save_jpg=True)
| 0.414543 | 0.800458 |
# Testing some assumptions
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('../Data/restaurant_1_2018-12-09.csv')
data = data.iloc[:, 1:]
data.head()
data['promised delivery time'] = pd.to_datetime(data['promised delivery time'])
data['ordering time'] = pd.to_datetime(data['ordering time'])
data['ordering hour'] = data['ordering time'].dt.hour
data.head()
hours = data['ordering hour'].unique()
data_hour = {i:[] for i in hours}
for h in hours:
df = data.loc[data['ordering hour'] == h]
data_hour[h].append(df)
data_hour[6][0].info
data_hour_6 = data_hour[6][0]
data_hour_6.head()
data_hour_6['delivery time (m)'].unique()
plt.scatter(data_hour_6['delivery time (s)'], data_hour_6['distance (m)'])
# correlation
corr = data_hour_6['delivery time (s)'].corr(data_hour_6['distance (m)'])
corr
data_hour_6[['delivery time (s)', 'distance (m)']]
```
## Test the average promise delivery time by hours
```
data_res_1 = pd.read_csv('../Data/restaurant_1.csv')
data_res_1 = data_res_1.iloc[:, 1:]
data_res_1.head()
import datetime
# transfer to datetime
data_res_1['promised delivery time'] = pd.to_datetime(data_res_1['promised delivery time'])
data_res_1['ordering time'] = pd.to_datetime(data_res_1['ordering time'])
dates = [i for i in data_res_1['ordering time'].dt.date.unique()]
# add new column 'hour' represent the order hour
data_res_1['ordering hour'] = data_res_1['ordering time'].dt.hour
data_res_1['ordering date'] = data_res_1['ordering time'].dt.date
# number of orders for each hour
hours_1 = data_res_1['ordering hour'].unique()
orders_per_day_hour = {(i.strftime('%Y-%m-%d'), j):0 for i in dates for j in hours_1}
for d in dates:
for h in hours_1:
df = data_res_1.loc[(data_res_1['ordering hour'] == h) & (data_res_1['ordering date'] == d)]
orders_per_day_hour[(d.strftime('%Y-%m-%d'), h)] = df.shape[0]
orders_per_day_hour
out_df = pd.Series(orders_per_day_hour).reset_index()
out_df.columns = ['Date', 'Hour', 'Num of Orders']
out_df
out_df.to_csv('../Data/restaurant_1_orders_per_day_hour.csv')
hour_order_dict = {k:0 for k in range(6, 22)}
for h in range(6, 22):
print(h, out_df.loc[out_df['Hour'] == h]['Num of Orders'].mean())
hour_order_dict[h] = int(out_df.loc[out_df['Hour'] == h]['Num of Orders'].mean())
hour_order_dict
plt.bar(range(6, 22), list(hour_order_dict.values()))
hour_delivery_time_dict = {i:0 for i in range(6, 22)}
for h in range(6, 22):
print(h, data_res_1.loc[data_res_1['ordering hour'] == h]['delivery time (m)'].mean())
hour_delivery_time_dict[h] = int(data_res_1.loc[data_res_1['ordering hour'] == h]['delivery time (m)'].mean())
hour_delivery_time_dict
plt.bar(range(6, 22), list(hour_delivery_time_dict.values()))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('../Data/restaurant_1_2018-12-09.csv')
data = data.iloc[:, 1:]
data.head()
data['promised delivery time'] = pd.to_datetime(data['promised delivery time'])
data['ordering time'] = pd.to_datetime(data['ordering time'])
data['ordering hour'] = data['ordering time'].dt.hour
data.head()
hours = data['ordering hour'].unique()
data_hour = {i:[] for i in hours}
for h in hours:
df = data.loc[data['ordering hour'] == h]
data_hour[h].append(df)
data_hour[6][0].info
data_hour_6 = data_hour[6][0]
data_hour_6.head()
data_hour_6['delivery time (m)'].unique()
plt.scatter(data_hour_6['delivery time (s)'], data_hour_6['distance (m)'])
# correlation
corr = data_hour_6['delivery time (s)'].corr(data_hour_6['distance (m)'])
corr
data_hour_6[['delivery time (s)', 'distance (m)']]
data_res_1 = pd.read_csv('../Data/restaurant_1.csv')
data_res_1 = data_res_1.iloc[:, 1:]
data_res_1.head()
import datetime
# transfer to datetime
data_res_1['promised delivery time'] = pd.to_datetime(data_res_1['promised delivery time'])
data_res_1['ordering time'] = pd.to_datetime(data_res_1['ordering time'])
dates = [i for i in data_res_1['ordering time'].dt.date.unique()]
# add new column 'hour' represent the order hour
data_res_1['ordering hour'] = data_res_1['ordering time'].dt.hour
data_res_1['ordering date'] = data_res_1['ordering time'].dt.date
# number of orders for each hour
hours_1 = data_res_1['ordering hour'].unique()
orders_per_day_hour = {(i.strftime('%Y-%m-%d'), j):0 for i in dates for j in hours_1}
for d in dates:
for h in hours_1:
df = data_res_1.loc[(data_res_1['ordering hour'] == h) & (data_res_1['ordering date'] == d)]
orders_per_day_hour[(d.strftime('%Y-%m-%d'), h)] = df.shape[0]
orders_per_day_hour
out_df = pd.Series(orders_per_day_hour).reset_index()
out_df.columns = ['Date', 'Hour', 'Num of Orders']
out_df
out_df.to_csv('../Data/restaurant_1_orders_per_day_hour.csv')
hour_order_dict = {k:0 for k in range(6, 22)}
for h in range(6, 22):
print(h, out_df.loc[out_df['Hour'] == h]['Num of Orders'].mean())
hour_order_dict[h] = int(out_df.loc[out_df['Hour'] == h]['Num of Orders'].mean())
hour_order_dict
plt.bar(range(6, 22), list(hour_order_dict.values()))
hour_delivery_time_dict = {i:0 for i in range(6, 22)}
for h in range(6, 22):
print(h, data_res_1.loc[data_res_1['ordering hour'] == h]['delivery time (m)'].mean())
hour_delivery_time_dict[h] = int(data_res_1.loc[data_res_1['ordering hour'] == h]['delivery time (m)'].mean())
hour_delivery_time_dict
plt.bar(range(6, 22), list(hour_delivery_time_dict.values()))
| 0.233881 | 0.684182 |
```
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
print(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
n_samples , h,w = lfw_people.images.shape
x = lfw_people.data
n_features = x.shape[1]
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
```
|
github_jupyter
|
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
print(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
n_samples , h,w = lfw_people.images.shape
x = lfw_people.data
n_features = x.shape[1]
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| 0.833663 | 0.422743 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.